diff --git a/hbase-annotations/pom.xml b/hbase-annotations/pom.xml index 0e1910b0fb2a..d8bcf07aeb74 100644 --- a/hbase-annotations/pom.xml +++ b/hbase-annotations/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase org.apache.hbase + hbase 2.5.0-SNAPSHOT .. diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java index c2510efb026a..d9bae8490637 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ClientTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the client. This tests the hbase-client package and all of the client * tests in hbase-server. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java index 4341becbd68a..a168adec08af 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/CoprocessorTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to coprocessors. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java index a91033fa2d38..84f346baaea2 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FilterTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java index 22fbc1b724ff..c23bfa298b36 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/FlakeyTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as failing commonly on public build infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java index c2375ca4e5cb..8eee0e6ae4b9 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IOTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and * the like. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java index 6bc712e270cf..4e555b73fedb 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/IntegrationTests.java @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as 'integration/system' test, meaning that the test class has the following * characteristics: * - * - * Integration / System tests should have a class name starting with "IntegrationTest", and - * should be annotated with @Category(IntegrationTests.class). Integration tests can be run - * using the IntegrationTestsDriver class or from mvn verify. - * + * Integration / System tests should have a class name starting with "IntegrationTest", and should + * be annotated with @Category(IntegrationTests.class). Integration tests can be run using the + * IntegrationTestsDriver class or from mvn verify. * @see SmallTests * @see MediumTests * @see LargeTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java index aa183d5607d7..b47e5bab9a46 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/LargeTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'large', means that the test class has the following characteristics: * - * * @see SmallTests * @see MediumTests * @see IntegrationTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java index 4b49da4e4dc0..0e68ab3c0340 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MapReduceTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to mapred or mapreduce. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java index e837f49a268a..5dcf51b27e59 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MasterTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the master. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java index 0f8055b5bab0..d1f836ec0049 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MediumTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tagging a test as 'medium' means that the test class has the following characteristics: * - * - * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster. - * + * Use it for tests that cannot be tagged as 'small'. Use it when you need to start up a cluster. * @see SmallTests * @see LargeTests * @see IntegrationTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java index 59962a74c280..27beaacf963e 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MetricsTests.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java index 2759bfc96df7..695042e801bf 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/MiscTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as not easily falling into any of the below categories. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java index 4edb9bf031d2..929bd6487edf 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RPCTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to RPC. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java index 0f03b761fcb1..3439afa76eba 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RegionServerTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the regionserver. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java index 8b8be4de8125..df606c960c25 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ReplicationTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to replication. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java index e7d1d1d4c88c..a648b4c39e03 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/RestTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to the REST capability of HBase. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java index 5263d467cbee..a4e55ad3aba0 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SecurityTests.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as related to security. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java index 80e6c9d24209..64d2bce381b6 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/SmallTests.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,14 @@ /** * Tagging a test as 'small' means that the test class has the following characteristics: * - * * @see MediumTests * @see LargeTests * @see IntegrationTests */ -public interface SmallTests {} +public interface SmallTests { +} diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java index efc8d5ddc84c..d1f433b9719d 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowMapReduceTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** - * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build + * Tag a test as related to mapreduce and taking longer than 5 minutes to run on public build * infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java index 85507de5ad4d..f556979e5b6a 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/VerySlowRegionServerTests.java @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** * Tag a test as region tests which takes longer than 5 minutes to run on public build * infrastructure. - * * @see org.apache.hadoop.hbase.testclassification.ClientTests * @see org.apache.hadoop.hbase.testclassification.CoprocessorTests * @see org.apache.hadoop.hbase.testclassification.FilterTests diff --git a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java index 86aa6bdc85e6..9fa0579ed47e 100644 --- a/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java +++ b/hbase-annotations/src/test/java/org/apache/hadoop/hbase/testclassification/ZKTests.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.testclassification; /** diff --git a/hbase-archetypes/hbase-archetype-builder/pom.xml b/hbase-archetypes/hbase-archetype-builder/pom.xml index 8974db52aa25..7a8fb43fb89d 100644 --- a/hbase-archetypes/hbase-archetype-builder/pom.xml +++ b/hbase-archetypes/hbase-archetype-builder/pom.xml @@ -1,6 +1,5 @@ - - + + hbase-client__copy-src-to-build-archetype-subdir - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-client.dir}/${build.archetype.subdir} @@ -76,29 +75,30 @@ hbase-client__copy-pom-to-temp-for-xslt-processing - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-client.dir}/${temp.exemplar.subdir} /${project.basedir}/../${hbase-client.dir} - true + true + pom.xml - + hbase-shaded-client__copy-src-to-build-archetype-subdir - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-shaded-client.dir}/${build.archetype.subdir} @@ -113,20 +113,21 @@ hbase-shaded-client__copy-pom-to-temp-for-xslt-processing - generate-resources copy-resources + generate-resources /${project.basedir}/../${hbase-shaded-client.dir}/${temp.exemplar.subdir} /${project.basedir}/../${hbase-shaded-client.dir} - true + true + pom.xml - + @@ -137,10 +138,10 @@ using xml-maven-plugin for xslt transformation, below. --> hbase-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing - prepare-package copy-resources + prepare-package /${project.basedir}/../${hbase-client.dir}/${temp.archetype.subdir} @@ -149,16 +150,16 @@ pom.xml - + hbase-shaded-client-ARCHETYPE__copy-pom-to-temp-for-xslt-processing - prepare-package copy-resources + prepare-package /${project.basedir}/../${hbase-shaded-client.dir}/${temp.archetype.subdir} @@ -167,7 +168,7 @@ pom.xml - + @@ -182,10 +183,10 @@ modify-exemplar-pom-files-via-xslt - process-resources transform + process-resources @@ -212,10 +213,10 @@ prevent warnings when project is generated from archetype. --> modify-archetype-pom-files-via-xslt - package transform + package @@ -242,32 +243,32 @@ - maven-antrun-plugin + maven-antrun-plugin make-scripts-executable - process-resources run + process-resources - - + + run-createArchetypes-script - compile run + compile - - - + + + run-installArchetypes-script - install run + install - - - + + + diff --git a/hbase-archetypes/hbase-client-project/pom.xml b/hbase-archetypes/hbase-client-project/pom.xml index 08630ae8ee5c..3bc71c10f59d 100644 --- a/hbase-archetypes/hbase-client-project/pom.xml +++ b/hbase-archetypes/hbase-client-project/pom.xml @@ -1,8 +1,5 @@ - + 4.0.0 - hbase-archetypes org.apache.hbase + hbase-archetypes 2.5.0-SNAPSHOT .. diff --git a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java index 5164ab21716c..f9614e6d9f37 100644 --- a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java +++ b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/HelloHBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,19 +37,17 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Successful running of this application requires access to an active instance - * of HBase. For install instructions for a standalone instance of HBase, please - * refer to https://hbase.apache.org/book.html#quickstart + * Successful running of this application requires access to an active instance of HBase. For + * install instructions for a standalone instance of HBase, please refer to + * https://hbase.apache.org/book.html#quickstart */ public final class HelloHBase { protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); - static final byte[] MY_FIRST_COLUMN_QUALIFIER - = Bytes.toBytes("myFirstColumn"); - static final byte[] MY_SECOND_COLUMN_QUALIFIER - = Bytes.toBytes("mySecondColumn"); + static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn"); + static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); // Private constructor included here to avoid checkstyle warnings @@ -61,21 +58,21 @@ public static void main(final String[] args) throws IOException { final boolean deleteAllAtEOJ = true; /** - * ConnectionFactory#createConnection() automatically looks for - * hbase-site.xml (HBase configuration parameters) on the system's - * CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. + * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase + * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to + * HBase via ZooKeeper. */ try (Connection connection = ConnectionFactory.createConnection(); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.getClusterStatus(); // assure connection successfully established - System.out.println("\n*** Hello HBase! -- Connection has been " - + "established via ZooKeeper!!\n"); + System.out + .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n"); createNamespaceAndTable(admin); System.out.println("Getting a Table object for [" + MY_TABLE_NAME - + "] with which to perform CRUD operations in HBase."); + + "] with which to perform CRUD operations in HBase."); try (Table table = connection.getTable(MY_TABLE_NAME)) { putRowToTable(table); @@ -93,9 +90,8 @@ public static void main(final String[] args) throws IOException { } /** - * Invokes Admin#createNamespace and Admin#createTable to create a namespace - * with a table that has one column-family. - * + * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has + * one column-family. * @param admin Standard Admin object * @throws IOException If IO problem encountered */ @@ -104,48 +100,38 @@ static void createNamespaceAndTable(final Admin admin) throws IOException { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); - admin.createNamespace(NamespaceDescriptor - .create(MY_NAMESPACE_NAME).build()); + admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build()); } if (!admin.tableExists(MY_TABLE_NAME)) { System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() - + "], with one Column Family [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); + + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); TableDescriptor desc = TableDescriptorBuilder.newBuilder(MY_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(MY_COLUMN_FAMILY_NAME)).build(); admin.createTable(desc); } } /** - * Invokes Table#put to store a row (with two new columns created 'on the - * fly') into the table. - * + * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { - table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, - MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, - MY_SECOND_COLUMN_QUALIFIER, - Bytes.toBytes("World!"))); - - System.out.println("Row [" + Bytes.toString(MY_ROW_ID) - + "] was put into Table [" - + table.getName().getNameAsString() + "] in HBase;\n" - + " the row's two columns (created 'on the fly') are: [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) - + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); + table.put(new Put(MY_ROW_ID) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); + + System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + + table.getName().getNameAsString() + "] in HBase;\n" + + " the row's two columns (created 'on the fly') are: [" + + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); } /** * Invokes Table#get and prints out the contents of the retrieved row. - * * @param table Standard Table object * @throws IOException If IO problem encountered */ @@ -153,38 +139,32 @@ static void getAndPrintRowContents(final Table table) throws IOException { Result row = table.get(new Get(MY_ROW_ID)); - System.out.println("Row [" + Bytes.toString(row.getRow()) - + "] was retrieved from Table [" - + table.getName().getNameAsString() - + "] in HBase, with the following content:"); + System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + + table.getName().getNameAsString() + "] in HBase, with the following content:"); - for (Entry> colFamilyEntry - : row.getNoVersionMap().entrySet()) { + for (Entry> colFamilyEntry : row.getNoVersionMap() + .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); - System.out.println(" Columns in Column Family [" + columnFamilyName - + "]:"); + System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); - for (Entry columnNameAndValueMap - : colFamilyEntry.getValue().entrySet()) { + for (Entry columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" - + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " - + Bytes.toString(columnNameAndValueMap.getValue())); + + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + + Bytes.toString(columnNameAndValueMap.getValue())); } } } /** * Checks to see whether a namespace exists. - * * @param admin Standard Admin object * @param namespaceName Name of namespace * @return true If namespace exists * @throws IOException If IO problem encountered */ - static boolean namespaceExists(final Admin admin, final String namespaceName) - throws IOException { + static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException { try { admin.getNamespaceDescriptor(namespaceName); } catch (NamespaceNotFoundException e) { @@ -195,28 +175,24 @@ static boolean namespaceExists(final Admin admin, final String namespaceName) /** * Invokes Table#delete to delete test data (i.e. the row) - * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { - System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) - + "] from Table [" - + table.getName().getNameAsString() + "]."); + System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); } /** - * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to - * disable/delete Table and delete Namespace. - * + * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete + * Table and delete Namespace. * @param admin Standard Admin object * @throws IOException If IO problem is encountered */ static void deleteNamespaceAndTable(final Admin admin) throws IOException { if (admin.tableExists(MY_TABLE_NAME)) { - System.out.println("Disabling/deleting Table [" - + MY_TABLE_NAME.getNameAsString() + "]."); + System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "]."); admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.deleteTable(MY_TABLE_NAME); } diff --git a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/package-info.java b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/package-info.java index 554014e33f36..606504d4f951 100644 --- a/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/package-info.java +++ b/hbase-archetypes/hbase-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/client/package-info.java @@ -1,25 +1,16 @@ /* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** - * This package provides fully-functional exemplar Java code demonstrating - * simple usage of the hbase-client API, for incorporation into a Maven - * archetype with hbase-client dependency. + * This package provides fully-functional exemplar Java code demonstrating simple usage of the + * hbase-client API, for incorporation into a Maven archetype with hbase-client dependency. */ package org.apache.hbase.archetypes.exemplars.client; diff --git a/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java b/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java index 9a92e606ffb0..2a5d58437087 100644 --- a/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java +++ b/hbase-archetypes/hbase-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/client/TestHelloHBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,8 +46,7 @@ public class TestHelloHBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHelloHBase.class); - private static final HBaseTestingUtility TEST_UTIL - = new HBaseTestingUtility(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @BeforeClass public static void beforeClass() throws Exception { @@ -67,13 +66,11 @@ public void testNamespaceExists() throws Exception { Admin admin = TEST_UTIL.getAdmin(); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); - assertEquals("#namespaceExists failed: found nonexistent namespace.", - false, exists); + assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); - assertEquals("#namespaceExists failed: did NOT find existing namespace.", - true, exists); + assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists); admin.deleteNamespace(EXISTING_NAMESPACE); } @@ -82,14 +79,11 @@ public void testCreateNamespaceAndTable() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); - boolean namespaceExists - = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); - assertEquals("#createNamespaceAndTable failed to create namespace.", - true, namespaceExists); + boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); + assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); - assertEquals("#createNamespaceAndTable failed to create table.", - true, tableExists); + assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); @@ -100,8 +94,7 @@ public void testCreateNamespaceAndTable() throws Exception { public void testPutRowToTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); HelloHBase.putRowToTable(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); @@ -115,13 +108,10 @@ public void testPutRowToTable() throws IOException { public void testDeleteRow() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); - table.put(new Put(HelloHBase.MY_ROW_ID). - addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, - HelloHBase.MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("xyz"))); + table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, + HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz"))); HelloHBase.deleteRow(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); diff --git a/hbase-archetypes/hbase-shaded-client-project/pom.xml b/hbase-archetypes/hbase-shaded-client-project/pom.xml index 29f460506883..168f6b28df2a 100644 --- a/hbase-archetypes/hbase-shaded-client-project/pom.xml +++ b/hbase-archetypes/hbase-shaded-client-project/pom.xml @@ -1,8 +1,5 @@ - + 4.0.0 - hbase-archetypes org.apache.hbase + hbase-archetypes 2.5.0-SNAPSHOT .. @@ -44,16 +41,16 @@ org.apache.hbase hbase-testing-util test - - - javax.xml.bind - jaxb-api - - - javax.ws.rs - jsr311-api - - + + + javax.xml.bind + jaxb-api + + + javax.ws.rs + jsr311-api + + org.apache.hbase diff --git a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java index 94a1e711d47f..095fce4e6489 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/HelloHBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,19 +36,17 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Successful running of this application requires access to an active instance - * of HBase. For install instructions for a standalone instance of HBase, please - * refer to https://hbase.apache.org/book.html#quickstart + * Successful running of this application requires access to an active instance of HBase. For + * install instructions for a standalone instance of HBase, please refer to + * https://hbase.apache.org/book.html#quickstart */ public final class HelloHBase { protected static final String MY_NAMESPACE_NAME = "myTestNamespace"; static final TableName MY_TABLE_NAME = TableName.valueOf("myTestTable"); static final byte[] MY_COLUMN_FAMILY_NAME = Bytes.toBytes("cf"); - static final byte[] MY_FIRST_COLUMN_QUALIFIER - = Bytes.toBytes("myFirstColumn"); - static final byte[] MY_SECOND_COLUMN_QUALIFIER - = Bytes.toBytes("mySecondColumn"); + static final byte[] MY_FIRST_COLUMN_QUALIFIER = Bytes.toBytes("myFirstColumn"); + static final byte[] MY_SECOND_COLUMN_QUALIFIER = Bytes.toBytes("mySecondColumn"); static final byte[] MY_ROW_ID = Bytes.toBytes("rowId01"); // Private constructor included here to avoid checkstyle warnings @@ -60,21 +57,21 @@ public static void main(final String[] args) throws IOException { final boolean deleteAllAtEOJ = true; /** - * ConnectionFactory#createConnection() automatically looks for - * hbase-site.xml (HBase configuration parameters) on the system's - * CLASSPATH, to enable creation of Connection to HBase via ZooKeeper. + * ConnectionFactory#createConnection() automatically looks for hbase-site.xml (HBase + * configuration parameters) on the system's CLASSPATH, to enable creation of Connection to + * HBase via ZooKeeper. */ try (Connection connection = ConnectionFactory.createConnection(); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { admin.getClusterStatus(); // assure connection successfully established - System.out.println("\n*** Hello HBase! -- Connection has been " - + "established via ZooKeeper!!\n"); + System.out + .println("\n*** Hello HBase! -- Connection has been " + "established via ZooKeeper!!\n"); createNamespaceAndTable(admin); System.out.println("Getting a Table object for [" + MY_TABLE_NAME - + "] with which to perform CRUD operations in HBase."); + + "] with which to perform CRUD operations in HBase."); try (Table table = connection.getTable(MY_TABLE_NAME)) { putRowToTable(table); @@ -92,9 +89,8 @@ public static void main(final String[] args) throws IOException { } /** - * Invokes Admin#createNamespace and Admin#createTable to create a namespace - * with a table that has one column-family. - * + * Invokes Admin#createNamespace and Admin#createTable to create a namespace with a table that has + * one column-family. * @param admin Standard Admin object * @throws IOException If IO problem encountered */ @@ -103,47 +99,38 @@ static void createNamespaceAndTable(final Admin admin) throws IOException { if (!namespaceExists(admin, MY_NAMESPACE_NAME)) { System.out.println("Creating Namespace [" + MY_NAMESPACE_NAME + "]."); - admin.createNamespace(NamespaceDescriptor - .create(MY_NAMESPACE_NAME).build()); + admin.createNamespace(NamespaceDescriptor.create(MY_NAMESPACE_NAME).build()); } if (!admin.tableExists(MY_TABLE_NAME)) { System.out.println("Creating Table [" + MY_TABLE_NAME.getNameAsString() - + "], with one Column Family [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); + + "], with one Column Family [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + "]."); admin.createTable(new HTableDescriptor(MY_TABLE_NAME) - .addFamily(new HColumnDescriptor(MY_COLUMN_FAMILY_NAME))); + .addFamily(new HColumnDescriptor(MY_COLUMN_FAMILY_NAME))); } } /** - * Invokes Table#put to store a row (with two new columns created 'on the - * fly') into the table. - * + * Invokes Table#put to store a row (with two new columns created 'on the fly') into the table. * @param table Standard Table object (used for CRUD operations). * @throws IOException If IO problem encountered */ static void putRowToTable(final Table table) throws IOException { - table.put(new Put(MY_ROW_ID).addColumn(MY_COLUMN_FAMILY_NAME, - MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("Hello")).addColumn(MY_COLUMN_FAMILY_NAME, - MY_SECOND_COLUMN_QUALIFIER, - Bytes.toBytes("World!"))); - - System.out.println("Row [" + Bytes.toString(MY_ROW_ID) - + "] was put into Table [" - + table.getName().getNameAsString() + "] in HBase;\n" - + " the row's two columns (created 'on the fly') are: [" - + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) - + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" - + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); + table.put(new Put(MY_ROW_ID) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("Hello")) + .addColumn(MY_COLUMN_FAMILY_NAME, MY_SECOND_COLUMN_QUALIFIER, Bytes.toBytes("World!"))); + + System.out.println("Row [" + Bytes.toString(MY_ROW_ID) + "] was put into Table [" + + table.getName().getNameAsString() + "] in HBase;\n" + + " the row's two columns (created 'on the fly') are: [" + + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + Bytes.toString(MY_FIRST_COLUMN_QUALIFIER) + + "] and [" + Bytes.toString(MY_COLUMN_FAMILY_NAME) + ":" + + Bytes.toString(MY_SECOND_COLUMN_QUALIFIER) + "]"); } /** * Invokes Table#get and prints out the contents of the retrieved row. - * * @param table Standard Table object * @throws IOException If IO problem encountered */ @@ -151,38 +138,32 @@ static void getAndPrintRowContents(final Table table) throws IOException { Result row = table.get(new Get(MY_ROW_ID)); - System.out.println("Row [" + Bytes.toString(row.getRow()) - + "] was retrieved from Table [" - + table.getName().getNameAsString() - + "] in HBase, with the following content:"); + System.out.println("Row [" + Bytes.toString(row.getRow()) + "] was retrieved from Table [" + + table.getName().getNameAsString() + "] in HBase, with the following content:"); - for (Entry> colFamilyEntry - : row.getNoVersionMap().entrySet()) { + for (Entry> colFamilyEntry : row.getNoVersionMap() + .entrySet()) { String columnFamilyName = Bytes.toString(colFamilyEntry.getKey()); - System.out.println(" Columns in Column Family [" + columnFamilyName - + "]:"); + System.out.println(" Columns in Column Family [" + columnFamilyName + "]:"); - for (Entry columnNameAndValueMap - : colFamilyEntry.getValue().entrySet()) { + for (Entry columnNameAndValueMap : colFamilyEntry.getValue().entrySet()) { System.out.println(" Value of Column [" + columnFamilyName + ":" - + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " - + Bytes.toString(columnNameAndValueMap.getValue())); + + Bytes.toString(columnNameAndValueMap.getKey()) + "] == " + + Bytes.toString(columnNameAndValueMap.getValue())); } } } /** * Checks to see whether a namespace exists. - * * @param admin Standard Admin object * @param namespaceName Name of namespace * @return true If namespace exists * @throws IOException If IO problem encountered */ - static boolean namespaceExists(final Admin admin, final String namespaceName) - throws IOException { + static boolean namespaceExists(final Admin admin, final String namespaceName) throws IOException { try { admin.getNamespaceDescriptor(namespaceName); } catch (NamespaceNotFoundException e) { @@ -193,28 +174,24 @@ static boolean namespaceExists(final Admin admin, final String namespaceName) /** * Invokes Table#delete to delete test data (i.e. the row) - * * @param table Standard Table object * @throws IOException If IO problem is encountered */ static void deleteRow(final Table table) throws IOException { - System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) - + "] from Table [" - + table.getName().getNameAsString() + "]."); + System.out.println("Deleting row [" + Bytes.toString(MY_ROW_ID) + "] from Table [" + + table.getName().getNameAsString() + "]."); table.delete(new Delete(MY_ROW_ID)); } /** - * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to - * disable/delete Table and delete Namespace. - * + * Invokes Admin#disableTable, Admin#deleteTable, and Admin#deleteNamespace to disable/delete + * Table and delete Namespace. * @param admin Standard Admin object * @throws IOException If IO problem is encountered */ static void deleteNamespaceAndTable(final Admin admin) throws IOException { if (admin.tableExists(MY_TABLE_NAME)) { - System.out.println("Disabling/deleting Table [" - + MY_TABLE_NAME.getNameAsString() + "]."); + System.out.println("Disabling/deleting Table [" + MY_TABLE_NAME.getNameAsString() + "]."); admin.disableTable(MY_TABLE_NAME); // Disable a table before deleting it. admin.deleteTable(MY_TABLE_NAME); } diff --git a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/package-info.java b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/package-info.java index 754be16069c6..3181ddb6d30d 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/package-info.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/main/java/org/apache/hbase/archetypes/exemplars/shaded_client/package-info.java @@ -1,25 +1,16 @@ /* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** - * This package provides fully-functional exemplar Java code demonstrating - * simple usage of the hbase-client API, for incorporation into a Maven - * archetype with hbase-shaded-client dependency. + * This package provides fully-functional exemplar Java code demonstrating simple usage of the + * hbase-client API, for incorporation into a Maven archetype with hbase-shaded-client dependency. */ package org.apache.hbase.archetypes.exemplars.shaded_client; diff --git a/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java b/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java index 0f0f7d91ade4..3ff1396a52ae 100644 --- a/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java +++ b/hbase-archetypes/hbase-shaded-client-project/src/test/java/org/apache/hbase/archetypes/exemplars/shaded_client/TestHelloHBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,8 +46,7 @@ public class TestHelloHBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHelloHBase.class); - private static final HBaseTestingUtility TEST_UTIL - = new HBaseTestingUtility(); + private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @BeforeClass public static void beforeClass() throws Exception { @@ -67,13 +66,11 @@ public void testNamespaceExists() throws Exception { Admin admin = TEST_UTIL.getAdmin(); exists = HelloHBase.namespaceExists(admin, NONEXISTENT_NAMESPACE); - assertEquals("#namespaceExists failed: found nonexistent namespace.", - false, exists); + assertEquals("#namespaceExists failed: found nonexistent namespace.", false, exists); admin.createNamespace(NamespaceDescriptor.create(EXISTING_NAMESPACE).build()); exists = HelloHBase.namespaceExists(admin, EXISTING_NAMESPACE); - assertEquals("#namespaceExists failed: did NOT find existing namespace.", - true, exists); + assertEquals("#namespaceExists failed: did NOT find existing namespace.", true, exists); admin.deleteNamespace(EXISTING_NAMESPACE); } @@ -82,14 +79,11 @@ public void testCreateNamespaceAndTable() throws Exception { Admin admin = TEST_UTIL.getAdmin(); HelloHBase.createNamespaceAndTable(admin); - boolean namespaceExists - = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); - assertEquals("#createNamespaceAndTable failed to create namespace.", - true, namespaceExists); + boolean namespaceExists = HelloHBase.namespaceExists(admin, HelloHBase.MY_NAMESPACE_NAME); + assertEquals("#createNamespaceAndTable failed to create namespace.", true, namespaceExists); boolean tableExists = admin.tableExists(HelloHBase.MY_TABLE_NAME); - assertEquals("#createNamespaceAndTable failed to create table.", - true, tableExists); + assertEquals("#createNamespaceAndTable failed to create table.", true, tableExists); admin.disableTable(HelloHBase.MY_TABLE_NAME); admin.deleteTable(HelloHBase.MY_TABLE_NAME); @@ -100,8 +94,7 @@ public void testCreateNamespaceAndTable() throws Exception { public void testPutRowToTable() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); HelloHBase.putRowToTable(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); @@ -115,13 +108,10 @@ public void testPutRowToTable() throws IOException { public void testDeleteRow() throws IOException { Admin admin = TEST_UTIL.getAdmin(); admin.createNamespace(NamespaceDescriptor.create(HelloHBase.MY_NAMESPACE_NAME).build()); - Table table - = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); + Table table = TEST_UTIL.createTable(HelloHBase.MY_TABLE_NAME, HelloHBase.MY_COLUMN_FAMILY_NAME); - table.put(new Put(HelloHBase.MY_ROW_ID). - addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, - HelloHBase.MY_FIRST_COLUMN_QUALIFIER, - Bytes.toBytes("xyz"))); + table.put(new Put(HelloHBase.MY_ROW_ID).addColumn(HelloHBase.MY_COLUMN_FAMILY_NAME, + HelloHBase.MY_FIRST_COLUMN_QUALIFIER, Bytes.toBytes("xyz"))); HelloHBase.deleteRow(table); Result row = table.get(new Get(HelloHBase.MY_ROW_ID)); assertEquals("#deleteRow failed to delete row.", true, row.isEmpty()); diff --git a/hbase-archetypes/pom.xml b/hbase-archetypes/pom.xml index 9cdd4cff599e..87e2f72ddb70 100644 --- a/hbase-archetypes/pom.xml +++ b/hbase-archetypes/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -68,10 +67,10 @@ spotbugs-maven-plugin - false spotbugs + false ${project.basedir}/../dev-support/spotbugs-exclude.xml diff --git a/hbase-assembly/pom.xml b/hbase-assembly/pom.xml index 796f443e9b3d..dee3c395f390 100644 --- a/hbase-assembly/pom.xml +++ b/hbase-assembly/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-assembly - Apache HBase - Assembly - - Module that does project assembly and that is all that it does. - pom + Apache HBase - Assembly + Module that does project assembly and that is all that it does. true - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - aggregate-licenses - - process - - - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - maven-assembly-plugin - - - hbase-${project.version} - false - true - posix - - ${assembly.file} - src/main/assembly/client.xml - - - - - maven-dependency-plugin - - - - create-hbase-generated-classpath - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath.txt - jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce - - - - - - create-hbase-generated-classpath-jline - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath_jline.txt - jline - - - - - - create-hbase-generated-classpath-jruby - test - - build-classpath - - - ${project.parent.basedir}/target/cached_classpath_jruby.txt - jruby-complete - - - - - - - unpack-dependency-notices - prepare-package - - unpack-dependencies - - - pom - true - **\/NOTICE,**\/NOTICE.txt - - - - - - org.codehaus.mojo - exec-maven-plugin - ${exec.maven.version} - - - concat-NOTICE-files - package - - exec - - - env - - bash - -c - cat maven-shared-archive-resources/META-INF/NOTICE \ - `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` - - - ${project.build.directory}/NOTICE.aggregate - ${project.build.directory} - - - - - - - @@ -189,7 +47,7 @@ org.apache.hbase hbase-shaded-mapreduce - + org.apache.hbase hbase-it @@ -258,16 +116,16 @@ hbase-external-blockcache - org.apache.hbase - hbase-testing-util + org.apache.hbase + hbase-testing-util - org.apache.hbase - hbase-metrics-api + org.apache.hbase + hbase-metrics-api - org.apache.hbase - hbase-metrics + org.apache.hbase + hbase-metrics org.apache.hbase @@ -278,9 +136,9 @@ hbase-protocol-shaded - org.apache.hbase - hbase-resource-bundle - true + org.apache.hbase + hbase-resource-bundle + true org.apache.httpcomponents @@ -375,12 +233,151 @@ log4j-1.2-api + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + aggregate-licenses + + process + + + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + maven-assembly-plugin + + + hbase-${project.version} + false + true + posix + + ${assembly.file} + src/main/assembly/client.xml + + + + + maven-dependency-plugin + + + + create-hbase-generated-classpath + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath.txt + jline,jruby-complete,hbase-shaded-client,hbase-shaded-client-byo-hadoop,hbase-shaded-mapreduce + + + + + + create-hbase-generated-classpath-jline + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath_jline.txt + jline + + + + + + create-hbase-generated-classpath-jruby + + build-classpath + + test + + ${project.parent.basedir}/target/cached_classpath_jruby.txt + jruby-complete + + + + + + + unpack-dependency-notices + + unpack-dependencies + + prepare-package + + pom + true + **\/NOTICE,**\/NOTICE.txt + + + + + + org.codehaus.mojo + exec-maven-plugin + ${exec.maven.version} + + + concat-NOTICE-files + + exec + + package + + env + + bash + -c + cat maven-shared-archive-resources/META-INF/NOTICE \ + `find ${project.build.directory}/dependency -iname NOTICE -or -iname NOTICE.txt` + + ${project.build.directory}/NOTICE.aggregate + ${project.build.directory} + + + + + + + rsgroup - !skip-rsgroup + !skip-rsgroup @@ -388,18 +385,18 @@ org.apache.hbase hbase-rsgroup - - junit - junit - - - org.mockito - mockito-core - - - compile - - + + junit + junit + + + org.mockito + mockito-core + + + compile + + diff --git a/hbase-asyncfs/pom.xml b/hbase-asyncfs/pom.xml index 99a325d26e8a..4eb26471cebb 100644 --- a/hbase-asyncfs/pom.xml +++ b/hbase-asyncfs/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,33 +30,6 @@ hbase-asyncfs Apache HBase - Asynchronous FileSystem HBase Asynchronous FileSystem Implementation for WAL - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -169,6 +141,33 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + @@ -176,8 +175,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -265,8 +265,7 @@ lifecycle-mapping - - + diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java index 059ca00b02cc..b88b32bdb814 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,9 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.util.concurrent.CompletableFuture; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.CancelableProgressable; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface for asynchronous filesystem output stream. diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java index 5b713196d0b0..4ff903676f09 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSOutputHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java index f618158fdb37..d3735edd8897 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +22,9 @@ import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.completeFile; import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.endFileLease; import static org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.getStatus; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.READER_IDLE; import static org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleState.WRITER_IDLE; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import com.google.errorprone.annotations.RestrictedApi; import java.io.IOException; @@ -41,7 +41,6 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.Encryptor; import org.apache.hadoop.fs.Path; @@ -146,7 +145,7 @@ private static final class Callback { private long lastAckTimestamp = -1; public Callback(CompletableFuture future, long ackedLength, - final Collection replicas, long packetDataLen) { + final Collection replicas, long packetDataLen) { this.future = future; this.ackedLength = ackedLength; this.packetDataLen = packetDataLen; @@ -155,7 +154,7 @@ public Callback(CompletableFuture future, long ackedLength, this.unfinishedReplicas = Collections.emptySet(); } else { this.unfinishedReplicas = - Collections.newSetFromMap(new ConcurrentHashMap(replicas.size())); + Collections.newSetFromMap(new ConcurrentHashMap(replicas.size())); replicas.stream().map(Channel::id).forEachOrdered(unfinishedReplicas::add); } } @@ -197,7 +196,7 @@ private void completed(Channel channel) { if (c.unfinishedReplicas.remove(channel.id())) { long current = EnvironmentEdgeManager.currentTime(); streamSlowMonitor.checkProcessTimeAndSpeed(datanodeInfoMap.get(channel), c.packetDataLen, - current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); + current - c.flushTimestamp, c.lastAckTimestamp, c.unfinishedReplicas.size()); c.lastAckTimestamp = current; if (c.unfinishedReplicas.isEmpty()) { // we need to remove first before complete the future. It is possible that after we @@ -285,13 +284,13 @@ public AckHandler(int timeoutMs) { protected void channelRead0(ChannelHandlerContext ctx, PipelineAckProto ack) throws Exception { Status reply = getStatus(ack); if (reply != Status.SUCCESS) { - failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + - block + " from datanode " + ctx.channel().remoteAddress())); + failed(ctx.channel(), () -> new IOException("Bad response " + reply + " for block " + block + + " from datanode " + ctx.channel().remoteAddress())); return; } if (PipelineAck.isRestartOOBStatus(reply)) { - failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + - block + " from datanode " + ctx.channel().remoteAddress())); + failed(ctx.channel(), () -> new IOException("Restart response " + reply + " for block " + + block + " from datanode " + ctx.channel().remoteAddress())); return; } if (ack.getSeqno() == HEART_BEAT_SEQNO) { @@ -346,8 +345,8 @@ private void setupReceiver(int timeoutMs) { } } - FanOutOneBlockAsyncDFSOutput(Configuration conf,DistributedFileSystem dfs, - DFSClient client, ClientProtocol namenode, String clientName, String src, long fileId, + FanOutOneBlockAsyncDFSOutput(Configuration conf, DistributedFileSystem dfs, DFSClient client, + ClientProtocol namenode, String clientName, String src, long fileId, LocatedBlock locatedBlock, Encryptor encryptor, Map datanodeInfoMap, DataChecksum summer, ByteBufAllocator alloc, StreamSlowMonitor streamSlowMonitor) { this.conf = conf; @@ -419,8 +418,8 @@ private void flushBuffer(CompletableFuture future, ByteBuf dataBuf, ByteBuf headerBuf = alloc.buffer(headerLen); header.putInBuffer(headerBuf.nioBuffer(0, headerLen)); headerBuf.writerIndex(headerLen); - Callback c = new Callback(future, nextPacketOffsetInBlock + dataLen, - datanodeInfoMap.keySet(), dataLen); + Callback c = + new Callback(future, nextPacketOffsetInBlock + dataLen, datanodeInfoMap.keySet(), dataLen); waitingAckQueue.addLast(c); // recheck again after we pushed the callback to queue if (state != State.STREAMING && waitingAckQueue.peekFirst() == c) { @@ -430,7 +429,7 @@ private void flushBuffer(CompletableFuture future, ByteBuf dataBuf, return; } // TODO: we should perhaps measure time taken per DN here; - // we could collect statistics per DN, and/or exclude bad nodes in createOutput. + // we could collect statistics per DN, and/or exclude bad nodes in createOutput. datanodeInfoMap.keySet().forEach(ch -> { ch.write(headerBuf.retainedDuplicate()); ch.write(checksumBuf.retainedDuplicate()); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java index 45ff1cb145f9..e51e1f5a7756 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -227,13 +227,13 @@ public void end(DFSClient client, long inodeId) { private static FileCreator createFileCreator3_3() throws NoSuchMethodException { Method createMethod = ClientProtocol.class.getMethod("create", String.class, FsPermission.class, - String.class, EnumSetWritable.class, boolean.class, short.class, long.class, - CryptoProtocolVersion[].class, String.class, String.class); + String.class, EnumSetWritable.class, boolean.class, short.class, long.class, + CryptoProtocolVersion[].class, String.class, String.class); return (instance, src, masked, clientName, flag, createParent, replication, blockSize, supportedVersions) -> { return (HdfsFileStatus) createMethod.invoke(instance, src, masked, clientName, flag, - createParent, replication, blockSize, supportedVersions, null, null); + createParent, replication, blockSize, supportedVersions, null, null); }; } @@ -307,9 +307,9 @@ public boolean progress() { FILE_CREATOR = createFileCreator(); SHOULD_REPLICATE_FLAG = loadShouldReplicateFlag(); } catch (Exception e) { - String msg = "Couldn't properly initialize access to HDFS internals. Please " + - "update your WAL Provider to not make use of the 'asyncfs' provider. See " + - "HBASE-16110 for more information."; + String msg = "Couldn't properly initialize access to HDFS internals. Please " + + "update your WAL Provider to not make use of the 'asyncfs' provider. See " + + "HBASE-16110 for more information."; LOG.error(msg, e); throw new Error(msg, e); } @@ -356,11 +356,11 @@ protected void channelRead0(ChannelHandlerContext ctx, BlockOpResponseProto resp String logInfo = "ack with firstBadLink as " + resp.getFirstBadLink(); if (resp.getStatus() != Status.SUCCESS) { if (resp.getStatus() == Status.ERROR_ACCESS_TOKEN) { - throw new InvalidBlockTokenException("Got access token error" + ", status message " + - resp.getMessage() + ", " + logInfo); + throw new InvalidBlockTokenException("Got access token error" + ", status message " + + resp.getMessage() + ", " + logInfo); } else { - throw new IOException("Got error" + ", status=" + resp.getStatus().name() + - ", status message " + resp.getMessage() + ", " + logInfo); + throw new IOException("Got error" + ", status=" + resp.getStatus().name() + + ", status message " + resp.getMessage() + ", " + logInfo); } } // success @@ -402,11 +402,11 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E private static void requestWriteBlock(Channel channel, StorageType storageType, OpWriteBlockProto.Builder writeBlockProtoBuilder) throws IOException { - OpWriteBlockProto proto = - writeBlockProtoBuilder.setStorageType(PBHelperClient.convertStorageType(storageType)).build(); + OpWriteBlockProto proto = writeBlockProtoBuilder + .setStorageType(PBHelperClient.convertStorageType(storageType)).build(); int protoLen = proto.getSerializedSize(); ByteBuf buffer = - channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); + channel.alloc().buffer(3 + CodedOutputStream.computeRawVarint32Size(protoLen) + protoLen); buffer.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION); buffer.writeByte(Op.WRITE_BLOCK.code); proto.writeDelimitedTo(new ByteBufOutputStream(buffer)); @@ -446,9 +446,9 @@ private static List> connectToDataNodes(Configuration conf, DFSC ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock()); blockCopy.setNumBytes(locatedBlock.getBlockSize()); ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder() - .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)) - .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) - .setClientName(clientName).build(); + .setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)) + .setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))) + .setClientName(clientName).build(); ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer); OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder() .setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())) @@ -524,10 +524,10 @@ private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem d DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES); ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager(); Set toExcludeNodes = - new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); + new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet()); for (int retry = 0;; retry++) { LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, - toExcludeNodes, retry); + toExcludeNodes, retry); HdfsFileStatus stat; try { stat = FILE_CREATOR.create(namenode, src, @@ -565,8 +565,8 @@ private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem d } Encryptor encryptor = createEncryptor(conf, stat, client); FanOutOneBlockAsyncDFSOutput output = - new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, - stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor); + new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, + stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor); succ = true; return output; } catch (RemoteException e) { diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java index 090b9b4a63f1..112a88a45e50 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/FanOutOneBlockAsyncDFSOutputSaslHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -179,7 +179,7 @@ public AtomicBoolean getFallbackToSimpleAuth(SaslDataTransferClient saslClient) private static TransparentCryptoHelper createTransparentCryptoHelperWithoutHDFS12396() throws NoSuchMethodException { Method decryptEncryptedDataEncryptionKeyMethod = DFSClient.class - .getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class); + .getDeclaredMethod("decryptEncryptedDataEncryptionKey", FileEncryptionInfo.class); decryptEncryptedDataEncryptionKeyMethod.setAccessible(true); return new TransparentCryptoHelper() { @@ -188,7 +188,7 @@ public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) throws IOException { try { KeyVersion decryptedKey = - (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo); + (KeyVersion) decryptEncryptedDataEncryptionKeyMethod.invoke(client, feInfo); CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf, feInfo.getCipherSuite()); Encryptor encryptor = cryptoCodec.createEncryptor(); encryptor.init(decryptedKey.getMaterial(), feInfo.getIV()); @@ -218,7 +218,7 @@ public Encryptor createEncryptor(Configuration conf, FileEncryptionInfo feInfo, DFSClient client) throws IOException { try { KeyVersion decryptedKey = (KeyVersion) decryptEncryptedDataEncryptionKeyMethod - .invoke(null, feInfo, client.getKeyProvider()); + .invoke(null, feInfo, client.getKeyProvider()); CryptoCodec cryptoCodec = CryptoCodec.getInstance(conf, feInfo.getCipherSuite()); Encryptor encryptor = cryptoCodec.createEncryptor(); encryptor.init(decryptedKey.getMaterial(), feInfo.getIV()); @@ -240,8 +240,9 @@ private static TransparentCryptoHelper createTransparentCryptoHelper() try { return createTransparentCryptoHelperWithoutHDFS12396(); } catch (NoSuchMethodException e) { - LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + - " should be hadoop version with HDFS-12396", e); + LOG.debug("No decryptEncryptedDataEncryptionKey method in DFSClient," + + " should be hadoop version with HDFS-12396", + e); } return createTransparentCryptoHelperWithHDFS12396(); } @@ -324,8 +325,8 @@ private static final class SaslNegotiateHandler extends ChannelDuplexHandler { private int step = 0; public SaslNegotiateHandler(Configuration conf, String username, char[] password, - Map saslProps, int timeoutMs, Promise promise, - DFSClient dfsClient) throws SaslException { + Map saslProps, int timeoutMs, Promise promise, DFSClient dfsClient) + throws SaslException { this.conf = conf; this.saslProps = saslProps; this.saslClient = Sasl.createSaslClient(new String[] { MECHANISM }, username, PROTOCOL, @@ -355,8 +356,8 @@ private List getCipherOptions() throws IOException { } /** - * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. - * After Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. + * The asyncfs subsystem emulates a HDFS client by sending protobuf messages via netty. After + * Hadoop 3.3.0, the protobuf classes are relocated to org.apache.hadoop.thirdparty.protobuf.*. * Use Reflection to check which ones to use. */ private static class BuilderPayloadSetter { @@ -366,13 +367,12 @@ private static class BuilderPayloadSetter { /** * Create a ByteString from byte array without copying (wrap), and then set it as the payload * for the builder. - * * @param builder builder for HDFS DataTransferEncryptorMessage. * @param payload byte array of payload. * @throws IOException */ - static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, byte[] payload) - throws IOException { + static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, + byte[] payload) throws IOException { Object byteStringObject; try { // byteStringObject = new LiteralByteString(payload); @@ -396,18 +396,19 @@ static void wrapAndSetPayload(DataTransferEncryptorMessageProto.Builder builder, try { // See if it can load the relocated ByteString, which comes from hadoop-thirdparty. byteStringClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString"); - LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + - " Assuming this is Hadoop 3.3.0+."); + LOG.debug("Found relocated ByteString class from hadoop-thirdparty." + + " Assuming this is Hadoop 3.3.0+."); } catch (ClassNotFoundException e) { - LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + - " Assuming this is below Hadoop 3.3.0", e); + LOG.debug("Did not find relocated ByteString class from hadoop-thirdparty." + + " Assuming this is below Hadoop 3.3.0", + e); } // LiteralByteString is a package private class in protobuf. Make it accessible. Class literalByteStringClass; try { - literalByteStringClass = Class.forName( - "org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); + literalByteStringClass = + Class.forName("org.apache.hadoop.thirdparty.protobuf.ByteString$LiteralByteString"); LOG.debug("Shaded LiteralByteString from hadoop-thirdparty is found."); } catch (ClassNotFoundException e) { try { @@ -805,8 +806,7 @@ static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo d } doSaslNegotiation(conf, channel, timeoutMs, getUserNameFromEncryptionKey(encryptionKey), encryptionKeyToPassword(encryptionKey.encryptionKey), - createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, - client); + createSaslPropertiesForEncryption(encryptionKey.encryptionAlgorithm), saslPromise, client); } else if (!UserGroupInformation.isSecurityEnabled()) { if (LOG.isDebugEnabled()) { LOG.debug("SASL client skipping handshake in unsecured configuration for addr = " + addr @@ -832,7 +832,7 @@ static void trySaslNegotiate(Configuration conf, Channel channel, DatanodeInfo d } doSaslNegotiation(conf, channel, timeoutMs, buildUsername(accessToken), buildClientPassword(accessToken), saslPropsResolver.getClientProperties(addr), saslPromise, - client); + client); } else { // It's a secured cluster using non-privileged ports, but no SASL. The only way this can // happen is if the DataNode has ignore.secure.ports.for.testing configured, so this is a rare diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java index 3be9a2e49c1b..65a43bacdfe8 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/ProtobufDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,33 +17,29 @@ */ package org.apache.hadoop.hbase.io.asyncfs; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufUtil; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.handler.codec.MessageToMessageDecoder; import org.apache.hbase.thirdparty.io.netty.util.internal.ObjectUtil; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.List; /** - * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. - * The Netty's ProtobufDecode supports unshaded protobuf messages (com.google.protobuf). - * - * Hadoop 3.3.0 and above relocates protobuf classes to a shaded jar (hadoop-thirdparty), and - * so we must use reflection to detect which one (relocated or not) to use. - * - * Do not use this to process HBase's shaded protobuf messages. This is meant to process the - * protobuf messages in HDFS for the asyncfs use case. - * */ + * Modified based on io.netty.handler.codec.protobuf.ProtobufDecoder. The Netty's ProtobufDecode + * supports unshaded protobuf messages (com.google.protobuf). Hadoop 3.3.0 and above relocates + * protobuf classes to a shaded jar (hadoop-thirdparty), and so we must use reflection to detect + * which one (relocated or not) to use. Do not use this to process HBase's shaded protobuf messages. + * This is meant to process the protobuf messages in HDFS for the asyncfs use case. + */ @InterfaceAudience.Private public class ProtobufDecoder extends MessageToMessageDecoder { - private static final Logger LOG = - LoggerFactory.getLogger(ProtobufDecoder.class); + private static final Logger LOG = LoggerFactory.getLogger(ProtobufDecoder.class); private static Class protobufMessageLiteClass = null; private static Class protobufMessageLiteBuilderClass = null; @@ -60,23 +56,22 @@ public class ProtobufDecoder extends MessageToMessageDecoder { private Object parser; private Object builder; - public ProtobufDecoder(Object prototype) { try { - Method getDefaultInstanceForTypeMethod = protobufMessageLiteClass.getMethod( - "getDefaultInstanceForType"); - Object prototype1 = getDefaultInstanceForTypeMethod - .invoke(ObjectUtil.checkNotNull(prototype, "prototype")); + Method getDefaultInstanceForTypeMethod = + protobufMessageLiteClass.getMethod("getDefaultInstanceForType"); + Object prototype1 = + getDefaultInstanceForTypeMethod.invoke(ObjectUtil.checkNotNull(prototype, "prototype")); // parser = prototype.getParserForType() parser = getParserForTypeMethod.invoke(prototype1); - parseFromMethod = parser.getClass().getMethod( - "parseFrom", byte[].class, int.class, int.class); + parseFromMethod = + parser.getClass().getMethod("parseFrom", byte[].class, int.class, int.class); // builder = prototype.newBuilderForType(); builder = newBuilderForTypeMethod.invoke(prototype1); - mergeFromMethod = builder.getClass().getMethod( - "mergeFrom", byte[].class, int.class, int.class); + mergeFromMethod = + builder.getClass().getMethod("mergeFrom", byte[].class, int.class, int.class); // All protobuf message builders inherits from MessageLite.Builder buildMethod = protobufMessageLiteBuilderClass.getDeclaredMethod("build"); @@ -88,8 +83,7 @@ public ProtobufDecoder(Object prototype) { } } - protected void decode( - ChannelHandlerContext ctx, ByteBuf msg, List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf msg, List out) throws Exception { int length = msg.readableBytes(); byte[] array; int offset; @@ -122,8 +116,8 @@ protected void decode( try { protobufMessageLiteClass = Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite"); - protobufMessageLiteBuilderClass = Class.forName( - "org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); + protobufMessageLiteBuilderClass = + Class.forName("org.apache.hadoop.thirdparty.protobuf.MessageLite$Builder"); LOG.debug("Hadoop 3.3 and above shades protobuf."); } catch (ClassNotFoundException e) { LOG.debug("Hadoop 3.2 and below use unshaded protobuf.", e); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java index 2f652440e38e..d5dbfb02abc2 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/SendBufSizePredictor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java index c7cc1fcfcb4b..4f5f05d94276 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/WrapperAsyncFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; - import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.io.ByteArrayOutputStream; @@ -95,8 +94,8 @@ private void flush0(CompletableFuture future, ByteArrayOutputStream buffer } long pos = out.getPos(); /** - * This flush0 method could only be called by single thread, so here we could - * safely overwrite without any synchronization. + * This flush0 method could only be called by single thread, so here we could safely overwrite + * without any synchronization. */ this.syncedLength = pos; future.complete(pos); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java index 80748cad609a..7006fdf6409e 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/ExcludeDatanodeManager.java @@ -42,32 +42,31 @@ public class ExcludeDatanodeManager implements ConfigurationObserver { * Configure for the max count the excluded datanodes. */ public static final String WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY = - "hbase.regionserver.async.wal.max.exclude.datanode.count"; + "hbase.regionserver.async.wal.max.exclude.datanode.count"; public static final int DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT = 3; /** * Configure for the TTL time of the datanodes excluded */ public static final String WAL_EXCLUDE_DATANODE_TTL_KEY = - "hbase.regionserver.async.wal.exclude.datanode.info.ttl.hour"; + "hbase.regionserver.async.wal.exclude.datanode.info.ttl.hour"; public static final int DEFAULT_WAL_EXCLUDE_DATANODE_TTL = 6; // 6 hours private volatile Cache excludeDNsCache; private final int maxExcludeDNCount; private final Configuration conf; // This is a map of providerId->StreamSlowMonitor - private final Map streamSlowMonitors = - new ConcurrentHashMap<>(1); + private final Map streamSlowMonitors = new ConcurrentHashMap<>(1); public ExcludeDatanodeManager(Configuration conf) { this.conf = conf; this.maxExcludeDNCount = conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT); this.excludeDNsCache = CacheBuilder.newBuilder() - .expireAfterWrite(this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, - DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) - .maximumSize(this.maxExcludeDNCount) - .build(); + .expireAfterWrite( + this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .maximumSize(this.maxExcludeDNCount).build(); } /** @@ -85,15 +84,15 @@ public boolean tryAddExcludeDN(DatanodeInfo datanodeInfo, String cause) { datanodeInfo, cause, excludeDNsCache.size()); return true; } - LOG.debug("Try add datanode {} to exclude cache by [{}] failed, " - + "current exclude DNs are {}", datanodeInfo, cause, getExcludeDNs().keySet()); + LOG.debug( + "Try add datanode {} to exclude cache by [{}] failed, " + "current exclude DNs are {}", + datanodeInfo, cause, getExcludeDNs().keySet()); return false; } public StreamSlowMonitor getStreamSlowMonitor(String name) { String key = name == null || name.isEmpty() ? "defaultMonitorName" : name; - return streamSlowMonitors - .computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this)); + return streamSlowMonitors.computeIfAbsent(key, k -> new StreamSlowMonitor(conf, key, this)); } public Map getExcludeDNs() { @@ -105,10 +104,12 @@ public void onConfigurationChange(Configuration conf) { for (StreamSlowMonitor monitor : streamSlowMonitors.values()) { monitor.onConfigurationChange(conf); } - this.excludeDNsCache = CacheBuilder.newBuilder().expireAfterWrite( - this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), - TimeUnit.HOURS).maximumSize(this.conf - .getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) - .build(); + this.excludeDNsCache = CacheBuilder.newBuilder() + .expireAfterWrite( + this.conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .maximumSize(this.conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, + DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) + .build(); } } diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java index 7ee04f8eebd2..01755353095e 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/io/asyncfs/monitor/StreamSlowMonitor.java @@ -38,47 +38,44 @@ import org.apache.hbase.thirdparty.com.google.common.cache.LoadingCache; /** - * Class for monitor the wal file flush performance. - * Each active wal file has a StreamSlowMonitor. + * Class for monitor the wal file flush performance. Each active wal file has a StreamSlowMonitor. */ @InterfaceAudience.Private public class StreamSlowMonitor implements ConfigurationObserver { private static final Logger LOG = LoggerFactory.getLogger(StreamSlowMonitor.class); /** - * Configure for the min count for a datanode detected slow. - * If a datanode is detected slow times up to this count, then it will be added to the exclude - * datanode cache by {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} - * of this regionsever. + * Configure for the min count for a datanode detected slow. If a datanode is detected slow times + * up to this count, then it will be added to the exclude datanode cache by + * {@link ExcludeDatanodeManager#tryAddExcludeDN(DatanodeInfo, String)} of this regionsever. */ private static final String WAL_SLOW_DETECT_MIN_COUNT_KEY = - "hbase.regionserver.async.wal.min.slow.detect.count"; + "hbase.regionserver.async.wal.min.slow.detect.count"; private static final int DEFAULT_WAL_SLOW_DETECT_MIN_COUNT = 3; /** * Configure for the TTL of the data that a datanode detected slow. */ private static final String WAL_SLOW_DETECT_DATA_TTL_KEY = - "hbase.regionserver.async.wal.slow.detect.data.ttl.ms"; + "hbase.regionserver.async.wal.slow.detect.data.ttl.ms"; private static final long DEFAULT_WAL_SLOW_DETECT_DATA_TTL = 10 * 60 * 1000; // 10min in ms /** - * Configure for the speed check of packet min length. - * For packets whose data length smaller than this value, check slow by processing time. - * While for packets whose data length larger than this value, check slow by flushing speed. + * Configure for the speed check of packet min length. For packets whose data length smaller than + * this value, check slow by processing time. While for packets whose data length larger than this + * value, check slow by flushing speed. */ private static final String DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY = - "hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min"; - private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = - 64 * 1024; //64KB + "hbase.regionserver.async.wal.datanode.slow.check.speed.packet.data.length.min"; + private static final long DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH = 64 * 1024; // 64KB /** - * Configure for the slow packet process time, a duration from send to ACK. - * The processing time check is for packets that data length smaller than + * Configure for the slow packet process time, a duration from send to ACK. The processing time + * check is for packets that data length smaller than * {@link StreamSlowMonitor#DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY} */ public static final String DATANODE_SLOW_PACKET_PROCESS_TIME_KEY = - "hbase.regionserver.async.wal.datanode.slow.packet.process.time.millis"; + "hbase.regionserver.async.wal.datanode.slow.packet.process.time.millis"; private static final long DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME = 6000; // 6s in ms /** @@ -88,7 +85,7 @@ public class StreamSlowMonitor implements ConfigurationObserver { * 64KB should be processed in less than 3.2s. */ private static final String DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY = - "hbase.regionserver.async.wal.datanode.slow.packet.speed.min.kbs"; + "hbase.regionserver.async.wal.datanode.slow.packet.speed.min.kbs"; private static final double DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED = 20; // 20KB/s private final String name; @@ -108,16 +105,17 @@ public StreamSlowMonitor(Configuration conf, String name, this.name = name; this.excludeDatanodeManager = excludeDatanodeManager; this.datanodeSlowDataQueue = CacheBuilder.newBuilder() - .maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, - DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) - .expireAfterWrite(conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, - DEFAULT_WAL_EXCLUDE_DATANODE_TTL), TimeUnit.HOURS) - .build(new CacheLoader>() { - @Override - public Deque load(DatanodeInfo key) throws Exception { - return new ConcurrentLinkedDeque<>(); - } - }); + .maximumSize(conf.getInt(WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT_KEY, + DEFAULT_WAL_MAX_EXCLUDE_SLOW_DATANODE_COUNT)) + .expireAfterWrite( + conf.getLong(WAL_EXCLUDE_DATANODE_TTL_KEY, DEFAULT_WAL_EXCLUDE_DATANODE_TTL), + TimeUnit.HOURS) + .build(new CacheLoader>() { + @Override + public Deque load(DatanodeInfo key) throws Exception { + return new ConcurrentLinkedDeque<>(); + } + }); LOG.info("New stream slow monitor {}", this.name); } @@ -140,17 +138,18 @@ public void checkProcessTimeAndSpeed(DatanodeInfo datanodeInfo, long packetDataL // 1. For small packet, we just have a simple time limit, without considering // the size of the packet. // 2. For large packet, we will calculate the speed, and check if the speed is too slow. - boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) || ( - packetDataLen > minLengthForSpeedCheck - && (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs); + boolean slow = (packetDataLen <= minLengthForSpeedCheck && processTimeMs > slowPacketAckMs) + || (packetDataLen > minLengthForSpeedCheck + && (double) packetDataLen / processTimeMs < minPacketFlushSpeedKBs); if (slow) { // Check if large diff ack timestamp between replicas, // should try to avoid misjudgments that caused by GC STW. - if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) || ( - lastAckTimestamp <= 0 && unfinished == 0)) { - LOG.info("Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " - + "lastAckTimestamp={}, monitor name: {}", datanodeInfo, packetDataLen, processTimeMs, - unfinished, lastAckTimestamp, this.name); + if ((lastAckTimestamp > 0 && current - lastAckTimestamp > slowPacketAckMs / 2) + || (lastAckTimestamp <= 0 && unfinished == 0)) { + LOG.info( + "Slow datanode: {}, data length={}, duration={}ms, unfinishedReplicas={}, " + + "lastAckTimestamp={}, monitor name: {}", + datanodeInfo, packetDataLen, processTimeMs, unfinished, lastAckTimestamp, this.name); if (addSlowAckData(datanodeInfo, packetDataLen, processTimeMs)) { excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "slow packet ack"); } @@ -167,7 +166,7 @@ private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long Deque slowDNQueue = datanodeSlowDataQueue.getUnchecked(datanodeInfo); long current = EnvironmentEdgeManager.currentTime(); while (!slowDNQueue.isEmpty() && (current - slowDNQueue.getFirst().getTimestamp() > slowDataTtl - || slowDNQueue.size() >= minSlowDetectCount)) { + || slowDNQueue.size() >= minSlowDetectCount)) { slowDNQueue.removeFirst(); } slowDNQueue.addLast(new PacketAckData(dataLength, processTime)); @@ -175,14 +174,14 @@ private boolean addSlowAckData(DatanodeInfo datanodeInfo, long dataLength, long } private void setConf(Configuration conf) { - this.minSlowDetectCount = conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, - DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); + this.minSlowDetectCount = + conf.getInt(WAL_SLOW_DETECT_MIN_COUNT_KEY, DEFAULT_WAL_SLOW_DETECT_MIN_COUNT); this.slowDataTtl = conf.getLong(WAL_SLOW_DETECT_DATA_TTL_KEY, DEFAULT_WAL_SLOW_DETECT_DATA_TTL); this.slowPacketAckMs = conf.getLong(DATANODE_SLOW_PACKET_PROCESS_TIME_KEY, - DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); - this.minLengthForSpeedCheck = conf.getLong( - DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, - DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH); + DEFAULT_DATANODE_SLOW_PACKET_PROCESS_TIME); + this.minLengthForSpeedCheck = + conf.getLong(DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH_KEY, + DEFAULT_DATANODE_PACKET_FLUSH_CHECK_SPEED_MIN_DATA_LENGTH); this.minPacketFlushSpeedKBs = conf.getDouble(DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED_KEY, DEFAULT_DATANODE_SLOW_PACKET_FLUSH_MIN_SPEED); } diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java index 91c003cb6dd0..0f80f874a319 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/CancelableProgressable.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns - * a boolean to support canceling the operation. + * Similar interface as {@link org.apache.hadoop.util.Progressable} but returns a boolean to support + * canceling the operation. *

* Used for doing updating of OPENING znode during log replay on region open. */ @@ -30,8 +29,8 @@ public interface CancelableProgressable { /** - * Report progress. Returns true if operations should continue, false if the - * operation should be canceled and rolled back. + * Report progress. Returns true if operations should continue, false if the operation should be + * canceled and rolled back. * @return whether to continue (true) or cancel (false) the operation */ boolean progress(); diff --git a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java index 9c3da1658c70..5a2d72ddc958 100644 --- a/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java +++ b/hbase-asyncfs/src/main/java/org/apache/hadoop/hbase/util/RecoverLeaseFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ private RecoverLeaseFSUtils() { } public static void recoverFileLease(FileSystem fs, Path p, Configuration conf) - throws IOException { + throws IOException { recoverFileLease(fs, p, conf, null); } @@ -51,7 +51,7 @@ public static void recoverFileLease(FileSystem fs, Path p, Configuration conf) * Recover the lease from HDFS, retrying multiple times. */ public static void recoverFileLease(FileSystem fs, Path p, Configuration conf, - CancelableProgressable reporter) throws IOException { + CancelableProgressable reporter) throws IOException { if (fs instanceof FilterFileSystem) { fs = ((FilterFileSystem) fs).getRawFileSystem(); } @@ -82,7 +82,7 @@ public static void recoverFileLease(FileSystem fs, Path p, Configuration conf, * second and we might be able to exit early. */ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p, - final Configuration conf, final CancelableProgressable reporter) throws IOException { + final Configuration conf, final CancelableProgressable reporter) throws IOException { LOG.info("Recover lease on dfs file " + p); long startWaiting = EnvironmentEdgeManager.currentTime(); // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS @@ -120,13 +120,13 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina // Cycle here until (subsequentPause * nbAttempt) elapses. While spinning, check // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though. long localStartWaiting = EnvironmentEdgeManager.currentTime(); - while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase * - nbAttempt) { + while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) < subsequentPauseBase + * nbAttempt) { Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000)); if (findIsFileClosedMeth) { try { isFileClosedMeth = - dfs.getClass().getMethod("isFileClosed", new Class[] { Path.class }); + dfs.getClass().getMethod("isFileClosed", new Class[] { Path.class }); } catch (NoSuchMethodException nsme) { LOG.debug("isFileClosed not available"); } finally { @@ -150,12 +150,12 @@ private static boolean recoverDFSFileLease(final DistributedFileSystem dfs, fina } private static boolean checkIfTimedout(final Configuration conf, final long recoveryTimeout, - final int nbAttempt, final Path p, final long startWaiting) { + final int nbAttempt, final Path p, final long startWaiting) { if (recoveryTimeout < EnvironmentEdgeManager.currentTime()) { - LOG.warn("Cannot recoverLease after trying for " + - conf.getInt("hbase.lease.recovery.timeout", 900000) + - "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + - getLogMessageDetail(nbAttempt, p, startWaiting)); + LOG.warn("Cannot recoverLease after trying for " + + conf.getInt("hbase.lease.recovery.timeout", 900000) + + "ms (hbase.lease.recovery.timeout); continuing, but may be DATALOSS!!!; " + + getLogMessageDetail(nbAttempt, p, startWaiting)); return true; } return false; @@ -166,12 +166,12 @@ private static boolean checkIfTimedout(final Configuration conf, final long reco * @return True if dfs#recoverLease came by true. */ private static boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, - final Path p, final long startWaiting) throws FileNotFoundException { + final Path p, final long startWaiting) throws FileNotFoundException { boolean recovered = false; try { recovered = dfs.recoverLease(p); - LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + - getLogMessageDetail(nbAttempt, p, startWaiting)); + LOG.info((recovered ? "Recovered lease, " : "Failed to recover lease, ") + + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { // This exception comes out instead of FNFE, fix it @@ -188,9 +188,9 @@ private static boolean recoverLease(final DistributedFileSystem dfs, final int n * @return Detail to append to any log message around lease recovering. */ private static String getLogMessageDetail(final int nbAttempt, final Path p, - final long startWaiting) { - return "attempt=" + nbAttempt + " on file=" + p + " after " + - (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; + final long startWaiting) { + return "attempt=" + nbAttempt + " on file=" + p + " after " + + (EnvironmentEdgeManager.currentTime() - startWaiting) + "ms"; } /** @@ -198,7 +198,7 @@ private static String getLogMessageDetail(final int nbAttempt, final Path p, * @return True if file is closed. */ private static boolean isFileClosed(final DistributedFileSystem dfs, final Method m, - final Path p) { + final Path p) { try { return (Boolean) m.invoke(dfs, p); } catch (SecurityException e) { @@ -210,7 +210,7 @@ private static boolean isFileClosed(final DistributedFileSystem dfs, final Metho } private static void checkIfCancelled(final CancelableProgressable reporter) - throws InterruptedIOException { + throws InterruptedIOException { if (reporter == null) { return; } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java index e1bc83ca684c..444fcb5c8e97 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/AsyncFSTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ protected static void setupClusterTestDir() { // Using randomUUID ensures that multiple clusters can be launched by // a same test, if it stops & starts them Path testDir = - UTIL.getDataTestDir("cluster_" + HBaseCommonTestingUtility.getRandomUUID().toString()); + UTIL.getDataTestDir("cluster_" + HBaseCommonTestingUtility.getRandomUUID().toString()); CLUSTER_TEST_DIR = new File(testDir.toString()).getAbsoluteFile(); // Have it cleaned up on exit boolean b = deleteOnExit(); diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java index a3da52ef335f..cdf09dda52b9 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestExcludeDatanodeManager.java @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -35,28 +36,24 @@ public class TestExcludeDatanodeManager { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestExcludeDatanodeManager.class); + HBaseClassTestRule.forClass(TestExcludeDatanodeManager.class); @Test public void testExcludeSlowDNBySpeed() { Configuration conf = HBaseConfiguration.create(); ExcludeDatanodeManager excludeDatanodeManager = new ExcludeDatanodeManager(conf); StreamSlowMonitor streamSlowDNsMonitor = - excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); + excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - DatanodeInfo datanodeInfo = - new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") - .setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) - .setIpcPort(444).setNetworkLocation("location1").build(); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, - System.currentTimeMillis() - 5100, 0); + DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0") + .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222) + .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build(); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 100000, 5100, + System.currentTimeMillis() - 5100, 0); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); } @@ -66,21 +63,17 @@ public void testExcludeSlowDNByProcessTime() { Configuration conf = HBaseConfiguration.create(); ExcludeDatanodeManager excludeDatanodeManager = new ExcludeDatanodeManager(conf); StreamSlowMonitor streamSlowDNsMonitor = - excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); + excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - DatanodeInfo datanodeInfo = - new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0").setHostName("hostname1") - .setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222).setInfoSecurePort(333) - .setIpcPort(444).setNetworkLocation("location1").build(); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); - streamSlowDNsMonitor - .checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, - System.currentTimeMillis() - 7000, 0); + DatanodeInfo datanodeInfo = new DatanodeInfo.DatanodeInfoBuilder().setIpAddr("0.0.0.0") + .setHostName("hostname1").setDatanodeUuid("uuid1").setXferPort(111).setInfoPort(222) + .setInfoSecurePort(333).setIpcPort(444).setNetworkLocation("location1").build(); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); + streamSlowDNsMonitor.checkProcessTimeAndSpeed(datanodeInfo, 5000, 7000, + System.currentTimeMillis() - 7000, 0); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); assertTrue(excludeDatanodeManager.getExcludeDNs().containsKey(datanodeInfo)); } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java index d363282921c9..0cd246b27a7d 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,6 +57,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; @@ -68,7 +69,7 @@ public class TestFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutput.class); + HBaseClassTestRule.forClass(TestFanOutOneBlockAsyncDFSOutput.class); private static final Logger LOG = LoggerFactory.getLogger(TestFanOutOneBlockAsyncDFSOutput.class); private static DistributedFileSystem FS; @@ -102,7 +103,7 @@ public static void tearDown() throws IOException, InterruptedException { private static final Random RNG = new Random(); // This test depends on Random#setSeed static void writeAndVerify(FileSystem fs, Path f, AsyncFSOutput out) - throws IOException, InterruptedException, ExecutionException { + throws IOException, InterruptedException, ExecutionException { List> futures = new ArrayList<>(); byte[] b = new byte[10]; // test pipelined flush @@ -199,12 +200,12 @@ public void testCreateParentFailed() throws IOException { @Test public void testConnectToDatanodeFailed() - throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, - InvocationTargetException, InterruptedException, NoSuchFieldException { + throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, + InvocationTargetException, InterruptedException, NoSuchFieldException { Field xceiverServerDaemonField = DataNode.class.getDeclaredField("dataXceiverServer"); xceiverServerDaemonField.setAccessible(true); Class xceiverServerClass = - Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); + Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); Method numPeersMethod = xceiverServerClass.getDeclaredMethod("getNumPeers"); numPeersMethod.setAccessible(true); // make one datanode broken @@ -223,12 +224,12 @@ public void testConnectToDatanodeFailed() @Test public void testExcludeFailedConnectToDatanode() - throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, - InvocationTargetException, InterruptedException, NoSuchFieldException { + throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, + InvocationTargetException, InterruptedException, NoSuchFieldException { Field xceiverServerDaemonField = DataNode.class.getDeclaredField("dataXceiverServer"); xceiverServerDaemonField.setAccessible(true); Class xceiverServerClass = - Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); + Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); Method numPeersMethod = xceiverServerClass.getDeclaredMethod("getNumPeers"); numPeersMethod.setAccessible(true); // make one datanode broken @@ -236,13 +237,13 @@ public void testExcludeFailedConnectToDatanode() Path f = new Path("/test"); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); ExcludeDatanodeManager excludeDatanodeManager = - new ExcludeDatanodeManager(HBaseConfiguration.create()); + new ExcludeDatanodeManager(HBaseConfiguration.create()); StreamSlowMonitor streamSlowDNsMonitor = - excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); + excludeDatanodeManager.getStreamSlowMonitor("testMonitor"); assertEquals(0, excludeDatanodeManager.getExcludeDNs().size()); - try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, - f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, - CHANNEL_CLASS, streamSlowDNsMonitor)) { + try (FanOutOneBlockAsyncDFSOutput output = + FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, + FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS, streamSlowDNsMonitor)) { // should exclude the dead dn when retry so here we only have 2 DNs in pipeline assertEquals(2, output.getPipeline().length); assertEquals(1, excludeDatanodeManager.getExcludeDNs().size()); diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java index 8ee838449e14..301fefd185b7 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestFanOutOneBlockAsyncDFSOutputHang.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,6 +47,7 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java index 66735a3fc8e8..41df455e78ef 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestLocalAsyncOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ public class TestLocalAsyncOutput { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLocalAsyncOutput.class); + HBaseClassTestRule.forClass(TestLocalAsyncOutput.class); private static EventLoopGroup GROUP = new NioEventLoopGroup(); @@ -61,7 +61,7 @@ public static void tearDownAfterClass() throws IOException { @Test public void test() throws IOException, InterruptedException, ExecutionException, - CommonFSUtils.StreamLacksCapabilityException { + CommonFSUtils.StreamLacksCapabilityException { Path f = new Path(TEST_UTIL.getDataTestDir(), "test"); FileSystem fs = FileSystem.getLocal(TEST_UTIL.getConfiguration()); AsyncFSOutput out = AsyncFSOutputHelper.createOutput(fs, f, false, true, diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java index 592598c8bb44..84ebe3786770 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestOverwriteFileUnderConstruction.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestOverwriteFileUnderConstruction extends AsyncFSTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestOverwriteFileUnderConstruction.class); + HBaseClassTestRule.forClass(TestOverwriteFileUnderConstruction.class); private static FileSystem FS; diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java index ab23b741b26d..873b589a90a2 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSaslFanOutOneBlockAsyncDFSOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_ENCRYPTION_ALGORITHM_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY; + import java.io.File; import java.io.IOException; import java.lang.reflect.Method; @@ -62,6 +63,7 @@ import org.junit.runners.Parameterized.Parameters; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.io.netty.channel.Channel; import org.apache.hbase.thirdparty.io.netty.channel.EventLoop; import org.apache.hbase.thirdparty.io.netty.channel.EventLoopGroup; @@ -73,11 +75,11 @@ public class TestSaslFanOutOneBlockAsyncDFSOutput extends AsyncFSTestBase { private static final Logger LOG = - LoggerFactory.getLogger(TestSaslFanOutOneBlockAsyncDFSOutput.class); + LoggerFactory.getLogger(TestSaslFanOutOneBlockAsyncDFSOutput.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestSaslFanOutOneBlockAsyncDFSOutput.class); + HBaseClassTestRule.forClass(TestSaslFanOutOneBlockAsyncDFSOutput.class); private static DistributedFileSystem FS; @@ -130,7 +132,7 @@ public static Iterable data() { private static void setUpKeyProvider(Configuration conf) throws Exception { URI keyProviderUri = - new URI("jceks://file" + UTIL.getDataTestDir("test.jks").toUri().toString()); + new URI("jceks://file" + UTIL.getDataTestDir("test.jks").toUri().toString()); conf.set("dfs.encryption.key.provider.uri", keyProviderUri.toString()); KeyProvider keyProvider = KeyProviderFactory.get(keyProviderUri, conf); keyProvider.createKey(TEST_KEY_NAME, KeyProvider.options(conf)); @@ -208,7 +210,7 @@ public static void tearDownAfterClass() throws IOException, InterruptedException private void createEncryptionZone() throws Exception { Method method = - DistributedFileSystem.class.getMethod("createEncryptionZone", Path.class, String.class); + DistributedFileSystem.class.getMethod("createEncryptionZone", Path.class, String.class); method.invoke(FS, entryptionTestDirOnTestFs, TEST_KEY_NAME); } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java index 55ef0b72b527..f9ea781d11da 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/io/asyncfs/TestSendBufSizePredictor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java index 5d0b2ebfff33..d12ff22722c4 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/security/HBaseKerberosUtils.java @@ -115,7 +115,7 @@ public static Configuration getSecuredConfiguration() { * @param spnegoPrincipal SPNEGO principal used by NN web UI. */ public static void setSecuredConfiguration(Configuration conf, String servicePrincipal, - String spnegoPrincipal) { + String spnegoPrincipal) { setPrincipalForTesting(servicePrincipal); setSecuredConfiguration(conf); setSecuredHadoopConfiguration(conf, spnegoPrincipal); @@ -131,7 +131,7 @@ public static void setSecuredConfiguration(Configuration conf) { } private static void setSecuredHadoopConfiguration(Configuration conf, - String spnegoServerPrincipal) { + String spnegoServerPrincipal) { String serverPrincipal = System.getProperty(KRB_PRINCIPAL); String keytabFilePath = System.getProperty(KRB_KEYTAB_FILE); // HDFS @@ -160,7 +160,7 @@ private static void setSecuredHadoopConfiguration(Configuration conf, * @throws Exception if unable to set up SSL configuration */ public static void setSSLConfiguration(HBaseCommonTestingUtility utility, Class clazz) - throws Exception { + throws Exception { Configuration conf = utility.getConfiguration(); conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); @@ -173,19 +173,19 @@ public static void setSSLConfiguration(HBaseCommonTestingUtility utility, Class< } public static UserGroupInformation loginAndReturnUGI(Configuration conf, String username) - throws IOException { + throws IOException { String hostname = InetAddress.getLocalHost().getHostName(); String keyTabFileConfKey = "hbase." + username + ".keytab.file"; String keyTabFileLocation = conf.get(keyTabFileConfKey); String principalConfKey = "hbase." + username + ".kerberos.principal"; String principal = org.apache.hadoop.security.SecurityUtil - .getServerPrincipal(conf.get(principalConfKey), hostname); + .getServerPrincipal(conf.get(principalConfKey), hostname); if (keyTabFileLocation == null || principal == null) { LOG.warn( "Principal or key tab file null for : " + principalConfKey + ", " + keyTabFileConfKey); } UserGroupInformation ugi = - UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation); + UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keyTabFileLocation); return ugi; } } diff --git a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java index 3931dfd5ba2c..3f99338ac4a3 100644 --- a/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java +++ b/hbase-asyncfs/src/test/java/org/apache/hadoop/hbase/util/TestRecoverLeaseFSUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -41,7 +40,7 @@ public class TestRecoverLeaseFSUtils { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRecoverLeaseFSUtils.class); + HBaseClassTestRule.forClass(TestRecoverLeaseFSUtils.class); private static final HBaseCommonTestingUtility HTU = new HBaseCommonTestingUtility(); static { @@ -64,13 +63,13 @@ public void testRecoverLease() throws IOException { DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class); // Fail four times and pass on the fifth. Mockito.when(dfs.recoverLease(FILE)).thenReturn(false).thenReturn(false).thenReturn(false) - .thenReturn(false).thenReturn(true); + .thenReturn(false).thenReturn(true); RecoverLeaseFSUtils.recoverFileLease(dfs, FILE, HTU.getConfiguration(), reporter); Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); // Make sure we waited at least hbase.lease.recovery.dfs.timeout * 3 (the first two // invocations will happen pretty fast... the we fall into the longer wait loop). - assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 * - HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); + assertTrue((EnvironmentEdgeManager.currentTime() - startTime) > (3 + * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); } /** diff --git a/hbase-build-configuration/pom.xml b/hbase-build-configuration/pom.xml index dd5002ff4589..49437d96f492 100644 --- a/hbase-build-configuration/pom.xml +++ b/hbase-build-configuration/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase org.apache.hbase + hbase 2.5.0-SNAPSHOT .. hbase-build-configuration - Apache HBase - Build Configuration - Configure the build-support artifacts for maven build pom + Apache HBase - Build Configuration + Configure the build-support artifacts for maven build + + + org.apache.hbase + hbase-annotations + test-jar + test + + + org.apache.yetus + audience-annotations + + @@ -50,18 +62,6 @@ - - - org.apache.hbase - hbase-annotations - test-jar - test - - - org.apache.yetus - audience-annotations - - diff --git a/hbase-checkstyle/pom.xml b/hbase-checkstyle/pom.xml index 6771dc8ebb4a..1618e8f13d36 100644 --- a/hbase-checkstyle/pom.xml +++ b/hbase-checkstyle/pom.xml @@ -1,7 +1,5 @@ - + -4.0.0 -org.apache.hbase -hbase-checkstyle -2.5.0-SNAPSHOT -Apache HBase - Checkstyle -Module to hold Checkstyle properties for HBase. - + 4.0.0 + - hbase org.apache.hbase + hbase 2.5.0-SNAPSHOT .. + org.apache.hbase + hbase-checkstyle + 2.5.0-SNAPSHOT + Apache HBase - Checkstyle + Module to hold Checkstyle properties for HBase. - - + + - - org.apache.maven.plugins - maven-site-plugin - - true - - - - - maven-assembly-plugin - - true - - - - + + org.apache.maven.plugins + maven-site-plugin + + true + + + + + maven-assembly-plugin + + true + + + + diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml index 93432a213b1f..e2f9d2ed10d6 100644 --- a/hbase-client/pom.xml +++ b/hbase-client/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,28 +30,6 @@ hbase-client Apache HBase - Client Client of HBase - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - @@ -221,6 +198,28 @@ + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + @@ -242,8 +241,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -398,8 +398,7 @@ lifecycle-mapping - - + diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java index b137a7da2ceb..23258f0faf67 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Abortable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,8 +22,8 @@ /** * Interface to support the aborting of a given server or client. *

- * This is used primarily for ZooKeeper usage when we could get an unexpected - * and fatal exception, requiring an abort. + * This is used primarily for ZooKeeper usage when we could get an unexpected and fatal exception, + * requiring an abort. *

* Implemented by the Master, RegionServer, and TableServers (client). */ @@ -38,8 +37,7 @@ public interface Abortable { void abort(String why, Throwable e); /** - * It just call another abort method and the Throwable - * parameter is null. + * It just call another abort method and the Throwable parameter is null. * @param why Why we're aborting. * @see Abortable#abort(String, Throwable) */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java index b1fcd945b7d6..1234361468d5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,13 +63,12 @@ public class AsyncMetaTableAccessor { private static final Logger LOG = LoggerFactory.getLogger(AsyncMetaTableAccessor.class); - /** The delimiter for meta columns for replicaIds > 0 */ private static final char META_REPLICA_ID_DELIMITER = '_'; /** A regex for parsing server columns from meta. See above javadoc for meta layout */ - private static final Pattern SERVER_COLUMN_PATTERN = Pattern - .compile("^server(_[0-9a-fA-F]{4})?$"); + private static final Pattern SERVER_COLUMN_PATTERN = + Pattern.compile("^server(_[0-9a-fA-F]{4})?$"); public static CompletableFuture tableExists(AsyncTable metaTable, TableName tableName) { @@ -106,19 +105,20 @@ public static CompletableFuture> getTableState(AsyncTable> getRegionLocation( - AsyncTable metaTable, byte[] regionName) { + public static CompletableFuture> + getRegionLocation(AsyncTable metaTable, byte[] regionName) { CompletableFuture> future = new CompletableFuture<>(); try { RegionInfo parsedRegionInfo = MetaTableAccessor.parseRegionInfoFromRegionName(regionName); addListener(metaTable.get(new Get(MetaTableAccessor.getMetaKeyForRegion(parsedRegionInfo)) - .addFamily(HConstants.CATALOG_FAMILY)), (r, err) -> { + .addFamily(HConstants.CATALOG_FAMILY)), + (r, err) -> { if (err != null) { future.completeExceptionally(err); return; } future.complete(getRegionLocations(r) - .map(locations -> locations.getRegionLocation(parsedRegionInfo.getReplicaId()))); + .map(locations -> locations.getRegionLocation(parsedRegionInfo.getReplicaId()))); }); } catch (IOException parseEx) { LOG.warn("Failed to parse the passed region name: " + Bytes.toStringBinary(regionName)); @@ -133,12 +133,12 @@ public static CompletableFuture> getRegionLocation( * @param encodedRegionName region we're looking for * @return HRegionLocation for the given region */ - public static CompletableFuture> getRegionLocationWithEncodedName( - AsyncTable metaTable, byte[] encodedRegionName) { + public static CompletableFuture> + getRegionLocationWithEncodedName(AsyncTable metaTable, byte[] encodedRegionName) { CompletableFuture> future = new CompletableFuture<>(); addListener( metaTable - .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)), + .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY)), (results, err) -> { if (err != null) { future.completeExceptionally(err); @@ -146,17 +146,17 @@ public static CompletableFuture> getRegionLocationWith } String encodedRegionNameStr = Bytes.toString(encodedRegionName); results.stream().filter(result -> !result.isEmpty()) - .filter(result -> MetaTableAccessor.getRegionInfo(result) != null).forEach(result -> { - getRegionLocations(result).ifPresent(locations -> { - for (HRegionLocation location : locations.getRegionLocations()) { - if (location != null && - encodedRegionNameStr.equals(location.getRegion().getEncodedName())) { - future.complete(Optional.of(location)); - return; + .filter(result -> MetaTableAccessor.getRegionInfo(result) != null).forEach(result -> { + getRegionLocations(result).ifPresent(locations -> { + for (HRegionLocation location : locations.getRegionLocations()) { + if (location != null + && encodedRegionNameStr.equals(location.getRegion().getEncodedName())) { + future.complete(Optional.of(location)); + return; + } } - } + }); }); - }); future.complete(Optional.empty()); }); return future; @@ -166,10 +166,9 @@ private static Optional getTableState(Result r) throws IOException { Cell cell = r.getColumnLatestCell(getTableFamily(), getStateColumn()); if (cell == null) return Optional.empty(); try { - return Optional.of(TableState.parseFrom( - TableName.valueOf(r.getRow()), - Arrays.copyOfRange(cell.getValueArray(), cell.getValueOffset(), cell.getValueOffset() - + cell.getValueLength()))); + return Optional.of( + TableState.parseFrom(TableName.valueOf(r.getRow()), Arrays.copyOfRange(cell.getValueArray(), + cell.getValueOffset(), cell.getValueOffset() + cell.getValueLength()))); } catch (DeserializationException e) { throw new IOException("Failed to parse table state from result: " + r, e); } @@ -192,8 +191,8 @@ public static CompletableFuture> getTableHRegionLocations( future.complete(Collections.emptyList()); } else { List regionLocations = - locations.stream().map(loc -> new HRegionLocation(loc.getFirst(), loc.getSecond())) - .collect(Collectors.toList()); + locations.stream().map(loc -> new HRegionLocation(loc.getFirst(), loc.getSecond())) + .collect(Collectors.toList()); future.complete(regionLocations); } }); @@ -209,8 +208,8 @@ public static CompletableFuture> getTableHRegionLocations( * {@link CompletableFuture}. */ private static CompletableFuture>> getTableRegionsAndLocations( - final AsyncTable metaTable, - final TableName tableName, final boolean excludeOfflinedSplitParents) { + final AsyncTable metaTable, final TableName tableName, + final boolean excludeOfflinedSplitParents) { CompletableFuture>> future = new CompletableFuture<>(); if (TableName.META_TABLE_NAME.equals(tableName)) { future.completeExceptionally(new IOException( @@ -219,36 +218,36 @@ private static CompletableFuture>> getTableReg // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress CollectingVisitor> visitor = - new CollectingVisitor>() { - private RegionLocations current = null; - - @Override - public boolean visit(Result r) throws IOException { - Optional currentRegionLocations = getRegionLocations(r); - current = currentRegionLocations.orElse(null); - if (current == null || current.getRegionLocation().getRegion() == null) { - LOG.warn("No serialized RegionInfo in " + r); - return true; - } - RegionInfo hri = current.getRegionLocation().getRegion(); - if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; - // Else call super and add this Result to the collection. - return super.visit(r); - } + new CollectingVisitor>() { + private RegionLocations current = null; + + @Override + public boolean visit(Result r) throws IOException { + Optional currentRegionLocations = getRegionLocations(r); + current = currentRegionLocations.orElse(null); + if (current == null || current.getRegionLocation().getRegion() == null) { + LOG.warn("No serialized RegionInfo in " + r); + return true; + } + RegionInfo hri = current.getRegionLocation().getRegion(); + if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; + // Else call super and add this Result to the collection. + return super.visit(r); + } - @Override - void add(Result r) { - if (current == null) { - return; - } - for (HRegionLocation loc : current.getRegionLocations()) { - if (loc != null) { - this.results.add(new Pair(loc.getRegion(), loc - .getServerName())); + @Override + void add(Result r) { + if (current == null) { + return; + } + for (HRegionLocation loc : current.getRegionLocations()) { + if (loc != null) { + this.results + .add(new Pair(loc.getRegion(), loc.getServerName())); + } + } } - } - } - }; + }; addListener(scanMeta(metaTable, tableName, QueryType.REGION, visitor), (v, error) -> { if (error != null) { @@ -332,7 +331,7 @@ public void onError(Throwable error) { @Override @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NONNULL_PARAM_VIOLATION", - justification = "https://github.com/findbugsproject/findbugs/issues/79") + justification = "https://github.com/findbugsproject/findbugs/issues/79") public void onComplete() { future.complete(null); } @@ -423,9 +422,8 @@ private static Optional getRegionLocations(final Result r) { } /** - * Returns the HRegionLocation parsed from the given meta row Result - * for the given regionInfo and replicaId. The regionInfo can be the default region info - * for the replica. + * Returns the HRegionLocation parsed from the given meta row Result for the given regionInfo and + * replicaId. The regionInfo can be the default region info for the replica. * @param r the meta row result * @param regionInfo RegionInfo for default replica * @param replicaId the replicaId for the HRegionLocation @@ -448,8 +446,8 @@ private static Optional getServerName(final Result r, final int repl byte[] serverColumn = getServerColumn(replicaId); Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn); if (cell == null || cell.getValueLength() == 0) return Optional.empty(); - String hostAndPort = Bytes.toString(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength()); + String hostAndPort = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); byte[] startcodeColumn = getStartCodeColumn(replicaId); cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn); if (cell == null || cell.getValueLength() == 0) return Optional.empty(); @@ -463,8 +461,8 @@ private static Optional getServerName(final Result r, final int repl } /** - * The latest seqnum that the server writing to meta observed when opening the region. - * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written. + * The latest seqnum that the server writing to meta observed when opening the region. E.g. the + * seqNum when the result of {@link #getServerName(Result, int)} was written. * @param r Result to pull the seqNum from * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written. */ @@ -582,10 +580,9 @@ private static byte[] getStateColumn() { * @return a byte[] for server column qualifier */ private static byte[] getServerColumn(int replicaId) { - return replicaId == 0 - ? HConstants.SERVER_QUALIFIER - : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.SERVER_QUALIFIER + : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -594,10 +591,9 @@ private static byte[] getServerColumn(int replicaId) { * @return a byte[] for server start code column qualifier */ private static byte[] getStartCodeColumn(int replicaId) { - return replicaId == 0 - ? HConstants.STARTCODE_QUALIFIER - : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.STARTCODE_QUALIFIER + : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -606,15 +602,14 @@ private static byte[] getStartCodeColumn(int replicaId) { * @return a byte[] for seqNum column qualifier */ private static byte[] getSeqNumColumn(int replicaId) { - return replicaId == 0 - ? HConstants.SEQNUM_QUALIFIER - : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER + : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** - * Parses the replicaId from the server column qualifier. See top of the class javadoc - * for the actual meta layout + * Parses the replicaId from the server column qualifier. See top of the class javadoc for the + * actual meta layout * @param serverColumn the column qualifier * @return an int for the replicaId */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java index 91cedd60299d..eb56dee1b12d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStats.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.util.Collections; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.yetus.audience.InterfaceAudience; @@ -57,8 +55,7 @@ public int getExceptionCount() { private String getFailedRegions() { return exceptions.keySet().stream() .map(regionName -> RegionInfo.prettyPrint(RegionInfo.encodeRegionName(regionName))) - .collect(Collectors.toList()) - .toString(); + .collect(Collectors.toList()).toString(); } @InterfaceAudience.Private @@ -68,11 +65,8 @@ public static CacheEvictionStatsBuilder builder() { @Override public String toString() { - return "CacheEvictionStats{" + - "evictedBlocks=" + evictedBlocks + - ", maxCacheSize=" + maxCacheSize + - ", failedRegionsSize=" + getExceptionCount() + - ", failedRegions=" + getFailedRegions() + - '}'; + return "CacheEvictionStats{" + "evictedBlocks=" + evictedBlocks + ", maxCacheSize=" + + maxCacheSize + ", failedRegionsSize=" + getExceptionCount() + ", failedRegions=" + + getFailedRegions() + '}'; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java index 85d68dcc08bc..679823338310 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsAggregator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java index d9e1400da16b..4b31d98611bc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CacheEvictionStatsBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.util.HashMap; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -42,7 +40,7 @@ public CacheEvictionStatsBuilder withMaxCacheSize(long maxCacheSize) { return this; } - public void addException(byte[] regionName, Throwable ie){ + public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java index 13ab3ed47cee..e1d3e4c79396 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallDroppedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Returned to the clients when their request was discarded due to server being overloaded. - * Clients should retry upon receiving it. + * Returned to the clients when their request was discarded due to server being overloaded. Clients + * should retry upon receiving it. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java index 12fa242693c8..1a3bc081ed7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CallQueueTooBigException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; @SuppressWarnings("serial") diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java index a63ca6936ec1..1afcb30ece01 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClockOutOfSyncException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,12 +18,10 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * This exception is thrown by the master when a region server clock skew is - * too high. + * This exception is thrown by the master when a region server clock skew is too high. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index 1dd01faf808a..dafdf6e5d5ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -15,29 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; import java.util.UUID; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterIdProtos; -import org.apache.hadoop.hbase.util.Bytes; /** - * The identifier for this cluster. - * It is serialized to the filesystem and up into zookeeper. This is a container for the id. - * Also knows how to serialize and deserialize the cluster id. + * The identifier for this cluster. It is serialized to the filesystem and up into zookeeper. This + * is a container for the id. Also knows how to serialize and deserialize the cluster id. */ @InterfaceAudience.Private public class ClusterId { private final String id; /** - * New ClusterID. Generates a uniqueid. + * New ClusterID. Generates a uniqueid. */ public ClusterId() { this(UUID.randomUUID().toString()); @@ -50,7 +48,7 @@ public ClusterId(final String uuid) { /** * @return The clusterid serialized using pb w/ pb magic prefix */ - public byte [] toByteArray() { + public byte[] toByteArray() { return ProtobufUtil.prependPBMagic(convert().toByteArray()); } @@ -60,7 +58,7 @@ public ClusterId(final String uuid) { * @throws DeserializationException * @see #toByteArray() */ - public static ClusterId parseFrom(final byte [] bytes) throws DeserializationException { + public static ClusterId parseFrom(final byte[] bytes) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes)) { int pblen = ProtobufUtil.lengthOfPBMagic(); ClusterIdProtos.ClusterId.Builder builder = ClusterIdProtos.ClusterId.newBuilder(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java index 29679e6fb6f4..98783be61269 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetrics.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -39,28 +37,32 @@ *

  • The average cluster load.
  • *
  • The number of regions deployed on the cluster.
  • *
  • The number of requests since last report.
  • - *
  • Detailed region server loading and resource usage information, - * per server and per region.
  • + *
  • Detailed region server loading and resource usage information, per server and per + * region.
  • *
  • Regions in transition at master
  • *
  • The unique cluster ID
  • * - * {@link Option} provides a way to get desired ClusterStatus information. - * The following codes will get all the cluster information. + * {@link Option} provides a way to get desired ClusterStatus information. The following + * codes will get all the cluster information. + * *
    - * {@code
    - * // Original version still works
    - * Admin admin = connection.getAdmin();
    - * ClusterMetrics metrics = admin.getClusterStatus();
    - * // or below, a new version which has the same effects
    - * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
    + * {
    + *   @code
    + *   // Original version still works
    + *   Admin admin = connection.getAdmin();
    + *   ClusterMetrics metrics = admin.getClusterStatus();
    + *   // or below, a new version which has the same effects
    + *   ClusterMetrics metrics = admin.getClusterStatus(EnumSet.allOf(Option.class));
      * }
      * 
    - * If information about live servers is the only wanted. - * then codes in the following way: + * + * If information about live servers is the only wanted. then codes in the following way: + * *
    - * {@code
    - * Admin admin = connection.getAdmin();
    - * ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
    + * {
    + *   @code
    + *   Admin admin = connection.getAdmin();
    + *   ClusterMetrics metrics = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
      * }
      * 
    */ @@ -129,8 +131,7 @@ default long getLastMajorCompactionTimestamp(TableName table) { default long getLastMajorCompactionTimestamp(byte[] regionName) { return getLiveServerMetrics().values().stream() - .filter(s -> s.getRegionMetrics().containsKey(regionName)) - .findAny() + .filter(s -> s.getRegionMetrics().containsKey(regionName)).findAny() .map(s -> s.getRegionMetrics().get(regionName).getLastMajorCompactionTimestamp()) .orElse(0L); } @@ -150,13 +151,12 @@ default double getAverageLoad() { if (serverSize == 0) { return 0; } - return (double)getRegionCount() / (double)serverSize; + return (double) getRegionCount() / (double) serverSize; } /** - * Provide region states count for given table. - * e.g howmany regions of give table are opened/closed/rit etc - * + * Provide region states count for given table. e.g howmany regions of give table are + * opened/closed/rit etc * @return map of table to region states count */ Map getTableRegionStatesCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 011f93f9fe90..308e9dceefae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -26,13 +24,13 @@ import java.util.Map; import java.util.TreeMap; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos.Option; @@ -43,49 +41,45 @@ public final class ClusterMetricsBuilder { public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics metrics) { - ClusterStatusProtos.ClusterStatus.Builder builder - = ClusterStatusProtos.ClusterStatus.newBuilder() - .addAllBackupMasters(metrics.getBackupMasterNames().stream() - .map(ProtobufUtil::toServerName).collect(Collectors.toList())) - .addAllDeadServers(metrics.getDeadServerNames().stream() - .map(ProtobufUtil::toServerName).collect(Collectors.toList())) - .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream() - .map(s -> ClusterStatusProtos.LiveServerInfo - .newBuilder() - .setServer(ProtobufUtil.toServerName(s.getKey())) - .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())) - .build()) - .collect(Collectors.toList())) - .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream() - .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) - .collect(Collectors.toList())) - .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream() - .map(r -> ClusterStatusProtos.RegionInTransition - .newBuilder() - .setSpec(HBaseProtos.RegionSpecifier - .newBuilder() - .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())) - .build()) - .setRegionState(r.convert()) - .build()) - .collect(Collectors.toList())) - .setMasterInfoPort(metrics.getMasterInfoPort()) - .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList())) - .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() - .map(status -> - ClusterStatusProtos.TableRegionStatesCount.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) - .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())) - .build()) - .collect(Collectors.toList())); + ClusterStatusProtos.ClusterStatus.Builder builder = + ClusterStatusProtos.ClusterStatus.newBuilder() + .addAllBackupMasters(metrics.getBackupMasterNames().stream() + .map(ProtobufUtil::toServerName).collect(Collectors.toList())) + .addAllDeadServers(metrics.getDeadServerNames().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .addAllLiveServers(metrics.getLiveServerMetrics().entrySet().stream() + .map(s -> ClusterStatusProtos.LiveServerInfo.newBuilder() + .setServer(ProtobufUtil.toServerName(s.getKey())) + .setServerLoad(ServerMetricsBuilder.toServerLoad(s.getValue())).build()) + .collect(Collectors.toList())) + .addAllMasterCoprocessors(metrics.getMasterCoprocessorNames().stream() + .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + .collect(Collectors.toList())) + .addAllRegionsInTransition(metrics.getRegionStatesInTransition().stream() + .map(r -> ClusterStatusProtos.RegionInTransition.newBuilder() + .setSpec(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(r.getRegion().getRegionName())) + .build()) + .setRegionState(r.convert()).build()) + .collect(Collectors.toList())) + .setMasterInfoPort(metrics.getMasterInfoPort()) + .addAllServersName(metrics.getServersName().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList())) + .addAllTableRegionStatesCount( + metrics.getTableRegionStatesCount().entrySet().stream() + .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder() + .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setRegionStatesCount( + ProtobufUtil.toTableRegionStatesCount(status.getValue())) + .build()) + .collect(Collectors.toList())); if (metrics.getMasterName() != null) { builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); } if (metrics.getMasterTasks() != null) { builder.addAllMasterTasks(metrics.getMasterTasks().stream() - .map(t -> ProtobufUtil.toServerTask(t)).collect(Collectors.toList())); + .map(t -> ProtobufUtil.toServerTask(t)).collect(Collectors.toList())); } if (metrics.getBalancerOn() != null) { builder.setBalancerOn(metrics.getBalancerOn()); @@ -95,40 +89,33 @@ public static ClusterStatusProtos.ClusterStatus toClusterStatus(ClusterMetrics m } if (metrics.getHBaseVersion() != null) { builder.setHbaseVersion( - FSProtos.HBaseVersionFileContent.newBuilder() - .setVersion(metrics.getHBaseVersion())); + FSProtos.HBaseVersionFileContent.newBuilder().setVersion(metrics.getHBaseVersion())); } return builder.build(); } - public static ClusterMetrics toClusterMetrics( - ClusterStatusProtos.ClusterStatus proto) { + public static ClusterMetrics toClusterMetrics(ClusterStatusProtos.ClusterStatus proto) { ClusterMetricsBuilder builder = ClusterMetricsBuilder.newBuilder(); - builder.setLiveServerMetrics(proto.getLiveServersList().stream() - .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()), - ServerMetricsBuilder::toServerMetrics))) - .setDeadServerNames(proto.getDeadServersList().stream() - .map(ProtobufUtil::toServerName) + builder + .setLiveServerMetrics(proto.getLiveServersList().stream() + .collect(Collectors.toMap(e -> ProtobufUtil.toServerName(e.getServer()), + ServerMetricsBuilder::toServerMetrics))) + .setDeadServerNames(proto.getDeadServersList().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) - .setBackerMasterNames(proto.getBackupMastersList().stream() - .map(ProtobufUtil::toServerName) + .setBackerMasterNames(proto.getBackupMastersList().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) .setRegionsInTransition(proto.getRegionsInTransitionList().stream() - .map(ClusterStatusProtos.RegionInTransition::getRegionState) - .map(RegionState::convert) + .map(ClusterStatusProtos.RegionInTransition::getRegionState).map(RegionState::convert) .collect(Collectors.toList())) .setMasterCoprocessorNames(proto.getMasterCoprocessorsList().stream() - .map(HBaseProtos.Coprocessor::getName) - .collect(Collectors.toList())) + .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) .setServerNames(proto.getServersNameList().stream().map(ProtobufUtil::toServerName) .collect(Collectors.toList())) - .setTableRegionStatesCount( - proto.getTableRegionStatesCountList().stream() - .collect(Collectors.toMap( - e -> ProtobufUtil.toTableName(e.getTableName()), - e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))) - .setMasterTasks(proto.getMasterTasksList().stream() - .map(t -> ProtobufUtil.getServerTask(t)).collect(Collectors.toList())); + .setTableRegionStatesCount(proto.getTableRegionStatesCountList().stream() + .collect(Collectors.toMap(e -> ProtobufUtil.toTableName(e.getTableName()), + e -> ProtobufUtil.toTableRegionStatesCount(e.getRegionStatesCount())))) + .setMasterTasks(proto.getMasterTasksList().stream().map(t -> ProtobufUtil.getServerTask(t)) + .collect(Collectors.toList())); if (proto.hasClusterId()) { builder.setClusterId(ClusterId.convert(proto.getClusterId()).toString()); } @@ -158,21 +145,35 @@ public static ClusterMetrics toClusterMetrics( */ public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) { switch (option) { - case HBASE_VERSION: return ClusterMetrics.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterMetrics.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterMetrics.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterMetrics.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterMetrics.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterMetrics.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterMetrics.Option.MASTER; - case BACKUP_MASTERS: return ClusterMetrics.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterMetrics.Option.BALANCER_ON; - case SERVERS_NAME: return ClusterMetrics.Option.SERVERS_NAME; - case MASTER_INFO_PORT: return ClusterMetrics.Option.MASTER_INFO_PORT; - case TABLE_TO_REGIONS_COUNT: return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; - case TASKS: return ClusterMetrics.Option.TASKS; + case HBASE_VERSION: + return ClusterMetrics.Option.HBASE_VERSION; + case LIVE_SERVERS: + return ClusterMetrics.Option.LIVE_SERVERS; + case DEAD_SERVERS: + return ClusterMetrics.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: + return ClusterMetrics.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: + return ClusterMetrics.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: + return ClusterMetrics.Option.MASTER_COPROCESSORS; + case MASTER: + return ClusterMetrics.Option.MASTER; + case BACKUP_MASTERS: + return ClusterMetrics.Option.BACKUP_MASTERS; + case BALANCER_ON: + return ClusterMetrics.Option.BALANCER_ON; + case SERVERS_NAME: + return ClusterMetrics.Option.SERVERS_NAME; + case MASTER_INFO_PORT: + return ClusterMetrics.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: + return ClusterMetrics.Option.TABLE_TO_REGIONS_COUNT; + case TASKS: + return ClusterMetrics.Option.TASKS; // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); + default: + throw new IllegalArgumentException("Invalid option: " + option); } } @@ -183,21 +184,35 @@ public static ClusterMetrics.Option toOption(ClusterStatusProtos.Option option) */ public static ClusterStatusProtos.Option toOption(ClusterMetrics.Option option) { switch (option) { - case HBASE_VERSION: return ClusterStatusProtos.Option.HBASE_VERSION; - case LIVE_SERVERS: return ClusterStatusProtos.Option.LIVE_SERVERS; - case DEAD_SERVERS: return ClusterStatusProtos.Option.DEAD_SERVERS; - case REGIONS_IN_TRANSITION: return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; - case CLUSTER_ID: return ClusterStatusProtos.Option.CLUSTER_ID; - case MASTER_COPROCESSORS: return ClusterStatusProtos.Option.MASTER_COPROCESSORS; - case MASTER: return ClusterStatusProtos.Option.MASTER; - case BACKUP_MASTERS: return ClusterStatusProtos.Option.BACKUP_MASTERS; - case BALANCER_ON: return ClusterStatusProtos.Option.BALANCER_ON; - case SERVERS_NAME: return Option.SERVERS_NAME; - case MASTER_INFO_PORT: return ClusterStatusProtos.Option.MASTER_INFO_PORT; - case TABLE_TO_REGIONS_COUNT: return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; - case TASKS: return ClusterStatusProtos.Option.TASKS; + case HBASE_VERSION: + return ClusterStatusProtos.Option.HBASE_VERSION; + case LIVE_SERVERS: + return ClusterStatusProtos.Option.LIVE_SERVERS; + case DEAD_SERVERS: + return ClusterStatusProtos.Option.DEAD_SERVERS; + case REGIONS_IN_TRANSITION: + return ClusterStatusProtos.Option.REGIONS_IN_TRANSITION; + case CLUSTER_ID: + return ClusterStatusProtos.Option.CLUSTER_ID; + case MASTER_COPROCESSORS: + return ClusterStatusProtos.Option.MASTER_COPROCESSORS; + case MASTER: + return ClusterStatusProtos.Option.MASTER; + case BACKUP_MASTERS: + return ClusterStatusProtos.Option.BACKUP_MASTERS; + case BALANCER_ON: + return ClusterStatusProtos.Option.BALANCER_ON; + case SERVERS_NAME: + return Option.SERVERS_NAME; + case MASTER_INFO_PORT: + return ClusterStatusProtos.Option.MASTER_INFO_PORT; + case TABLE_TO_REGIONS_COUNT: + return ClusterStatusProtos.Option.TABLE_TO_REGIONS_COUNT; + case TASKS: + return ClusterStatusProtos.Option.TASKS; // should not reach here - default: throw new IllegalArgumentException("Invalid option: " + option); + default: + throw new IllegalArgumentException("Invalid option: " + option); } } @@ -223,6 +238,7 @@ public static List toOptions(EnumSet deadServerNames = Collections.emptyList(); @@ -244,10 +260,12 @@ public static ClusterMetricsBuilder newBuilder() { private ClusterMetricsBuilder() { } + public ClusterMetricsBuilder setHBaseVersion(String value) { this.hbaseVersion = value; return this; } + public ClusterMetricsBuilder setDeadServerNames(List value) { this.deadServerNames = value; return this; @@ -262,62 +280,59 @@ public ClusterMetricsBuilder setMasterName(ServerName value) { this.masterName = value; return this; } + public ClusterMetricsBuilder setBackerMasterNames(List value) { this.backupMasterNames = value; return this; } + public ClusterMetricsBuilder setRegionsInTransition(List value) { this.regionsInTransition = value; return this; } + public ClusterMetricsBuilder setClusterId(String value) { this.clusterId = value; return this; } + public ClusterMetricsBuilder setMasterCoprocessorNames(List value) { this.masterCoprocessorNames = value; return this; } + public ClusterMetricsBuilder setBalancerOn(@Nullable Boolean value) { this.balancerOn = value; return this; } + public ClusterMetricsBuilder setMasterInfoPort(int value) { this.masterInfoPort = value; return this; } + public ClusterMetricsBuilder setServerNames(List serversName) { this.serversName = serversName; return this; } + public ClusterMetricsBuilder setMasterTasks(List masterTasks) { this.masterTasks = masterTasks; return this; } - public ClusterMetricsBuilder setTableRegionStatesCount( - Map tableRegionStatesCount) { + public ClusterMetricsBuilder + setTableRegionStatesCount(Map tableRegionStatesCount) { this.tableRegionStatesCount = tableRegionStatesCount; return this; } public ClusterMetrics build() { - return new ClusterMetricsImpl( - hbaseVersion, - deadServerNames, - liveServerMetrics, - masterName, - backupMasterNames, - regionsInTransition, - clusterId, - masterCoprocessorNames, - balancerOn, - masterInfoPort, - serversName, - tableRegionStatesCount, - masterTasks - ); + return new ClusterMetricsImpl(hbaseVersion, deadServerNames, liveServerMetrics, masterName, + backupMasterNames, regionsInTransition, clusterId, masterCoprocessorNames, balancerOn, + masterInfoPort, serversName, tableRegionStatesCount, masterTasks); } + private static class ClusterMetricsImpl implements ClusterMetrics { @Nullable private final String hbaseVersion; @@ -338,16 +353,10 @@ private static class ClusterMetricsImpl implements ClusterMetrics { private final List masterTasks; ClusterMetricsImpl(String hbaseVersion, List deadServerNames, - Map liveServerMetrics, - ServerName masterName, - List backupMasterNames, - List regionsInTransition, - String clusterId, - List masterCoprocessorNames, - Boolean balancerOn, - int masterInfoPort, - List serversName, - Map tableRegionStatesCount, + Map liveServerMetrics, ServerName masterName, + List backupMasterNames, List regionsInTransition, String clusterId, + List masterCoprocessorNames, Boolean balancerOn, int masterInfoPort, + List serversName, Map tableRegionStatesCount, List masterTasks) { this.hbaseVersion = hbaseVersion; this.deadServerNames = Preconditions.checkNotNull(deadServerNames); @@ -437,15 +446,15 @@ public String toString() { int backupMastersSize = getBackupMasterNames().size(); sb.append("\nNumber of backup masters: " + backupMastersSize); if (backupMastersSize > 0) { - for (ServerName serverName: getBackupMasterNames()) { + for (ServerName serverName : getBackupMasterNames()) { sb.append("\n " + serverName); } } int serversSize = getLiveServerMetrics().size(); int serversNameSize = getServersName().size(); - sb.append("\nNumber of live region servers: " - + (serversSize > 0 ? serversSize : serversNameSize)); + sb.append( + "\nNumber of live region servers: " + (serversSize > 0 ? serversSize : serversNameSize)); if (serversSize > 0) { for (ServerName serverName : getLiveServerMetrics().keySet()) { sb.append("\n " + serverName.getServerName()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 6a51db08d8c4..14a8aff3c49d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; @@ -26,7 +24,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.client.RegionStatesCount; import org.apache.hadoop.hbase.master.RegionState; import org.apache.yetus.audience.InterfaceAudience; @@ -45,32 +42,37 @@ *
  • The average cluster load.
  • *
  • The number of regions deployed on the cluster.
  • *
  • The number of requests since last report.
  • - *
  • Detailed region server loading and resource usage information, - * per server and per region.
  • + *
  • Detailed region server loading and resource usage information, per server and per + * region.
  • *
  • Regions in transition at master
  • *
  • The unique cluster ID
  • * * {@link ClusterMetrics.Option} provides a way to get desired ClusterStatus information. * The following codes will get all the cluster information. + * *
    - * {@code
    - * // Original version still works
    - * Admin admin = connection.getAdmin();
    - * ClusterStatus status = admin.getClusterStatus();
    - * // or below, a new version which has the same effects
    - * ClusterStatus status = admin.getClusterStatus(EnumSet.allOf(Option.class));
    + * {
    + *   @code
    + *   // Original version still works
    + *   Admin admin = connection.getAdmin();
    + *   ClusterStatus status = admin.getClusterStatus();
    + *   // or below, a new version which has the same effects
    + *   ClusterStatus status = admin.getClusterStatus(EnumSet.allOf(Option.class));
      * }
      * 
    - * If information about live servers is the only wanted. - * then codes in the following way: + * + * If information about live servers is the only wanted. then codes in the following way: + * *
    - * {@code
    - * Admin admin = connection.getAdmin();
    - * ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
    + * {
    + *   @code
    + *   Admin admin = connection.getAdmin();
    + *   ClusterStatus status = admin.getClusterStatus(EnumSet.of(Option.LIVE_SERVERS));
      * }
      * 
    - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link ClusterMetrics} instead. + * + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link ClusterMetrics} + * instead. */ @InterfaceAudience.Public @Deprecated @@ -86,26 +88,19 @@ public class ClusterStatus implements ClusterMetrics { */ @Deprecated public ClusterStatus(final String hbaseVersion, final String clusterid, - final Map servers, - final Collection deadServers, - final ServerName master, - final Collection backupMasters, - final List rit, - final String[] masterCoprocessors, - final Boolean balancerOn, + final Map servers, final Collection deadServers, + final ServerName master, final Collection backupMasters, + final List rit, final String[] masterCoprocessors, final Boolean balancerOn, final int masterInfoPort) { // TODO: make this constructor private this(ClusterMetricsBuilder.newBuilder().setHBaseVersion(hbaseVersion) - .setDeadServerNames(new ArrayList<>(deadServers)) - .setLiveServerMetrics(servers.entrySet().stream() - .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()))) - .setBackerMasterNames(new ArrayList<>(backupMasters)).setBalancerOn(balancerOn) - .setClusterId(clusterid) - .setMasterCoprocessorNames(Arrays.asList(masterCoprocessors)) - .setMasterName(master) - .setMasterInfoPort(masterInfoPort) - .setRegionsInTransition(rit) - .build()); + .setDeadServerNames(new ArrayList<>(deadServers)) + .setLiveServerMetrics( + servers.entrySet().stream().collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue()))) + .setBackerMasterNames(new ArrayList<>(backupMasters)).setBalancerOn(balancerOn) + .setClusterId(clusterid).setMasterCoprocessorNames(Arrays.asList(masterCoprocessors)) + .setMasterName(master).setMasterInfoPort(masterInfoPort).setRegionsInTransition(rit) + .build()); } @InterfaceAudience.Private @@ -127,10 +122,10 @@ public Map getLiveServerMetrics() { } /** - * @return the number of region servers in the cluster - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLiveServerMetrics()}. - */ + * @return the number of region servers in the cluster + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLiveServerMetrics()}. + */ @Deprecated public int getServersSize() { return metrics.getLiveServerMetrics().size(); @@ -139,8 +134,8 @@ public int getServersSize() { /** * @return the number of dead region servers in the cluster * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-13656). - * Use {@link #getDeadServerNames()}. + * (HBASE-13656). Use + * {@link #getDeadServerNames()}. */ @Deprecated public int getDeadServers() { @@ -149,8 +144,8 @@ public int getDeadServers() { /** * @return the number of dead region servers in the cluster - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getDeadServerNames()}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getDeadServerNames()}. */ @Deprecated public int getDeadServersSize() { @@ -159,8 +154,8 @@ public int getDeadServersSize() { /** * @return the number of regions deployed on the cluster - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionCount()}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionCount()}. */ @Deprecated public int getRegionsCount() { @@ -169,8 +164,8 @@ public int getRegionsCount() { /** * @return the number of requests since last report - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRequestCount()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRequestCount()} instead. */ @Deprecated public int getRequestsCount() { @@ -202,7 +197,7 @@ public String getHBaseVersion() { private Map getLiveServerLoads() { return metrics.getLiveServerMetrics().entrySet().stream() - .collect(Collectors.toMap(e -> e.getKey(), e -> new ServerLoad(e.getValue()))); + .collect(Collectors.toMap(e -> e.getKey(), e -> new ServerLoad(e.getValue()))); } @Override @@ -214,14 +209,14 @@ public boolean equals(Object o) { return false; } ClusterStatus other = (ClusterStatus) o; - return Objects.equal(getHBaseVersion(), other.getHBaseVersion()) && - Objects.equal(getLiveServerLoads(), other.getLiveServerLoads()) && - getDeadServerNames().containsAll(other.getDeadServerNames()) && - Arrays.equals(getMasterCoprocessors(), other.getMasterCoprocessors()) && - Objects.equal(getMaster(), other.getMaster()) && - getBackupMasters().containsAll(other.getBackupMasters()) && - Objects.equal(getClusterId(), other.getClusterId()) && - getMasterInfoPort() == other.getMasterInfoPort(); + return Objects.equal(getHBaseVersion(), other.getHBaseVersion()) + && Objects.equal(getLiveServerLoads(), other.getLiveServerLoads()) + && getDeadServerNames().containsAll(other.getDeadServerNames()) + && Arrays.equals(getMasterCoprocessors(), other.getMasterCoprocessors()) + && Objects.equal(getMaster(), other.getMaster()) + && getBackupMasters().containsAll(other.getBackupMasters()) + && Objects.equal(getClusterId(), other.getClusterId()) + && getMasterInfoPort() == other.getMasterInfoPort(); } @Override @@ -239,8 +234,8 @@ public byte getVersion() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLiveServerMetrics()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLiveServerMetrics()} instead. */ @Deprecated public Collection getServers() { @@ -250,8 +245,8 @@ public Collection getServers() { /** * Returns detailed information about the current master {@link ServerName}. * @return current master information if it exists - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMasterName} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link #getMasterName} + * instead. */ @Deprecated public ServerName getMaster() { @@ -260,8 +255,8 @@ public ServerName getMaster() { /** * @return the number of backup masters in the cluster - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getBackupMasterNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getBackupMasterNames} instead. */ @Deprecated public int getBackupMastersSize() { @@ -270,8 +265,8 @@ public int getBackupMastersSize() { /** * @return the names of backup masters - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getBackupMasterNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getBackupMasterNames} instead. */ @Deprecated public List getBackupMasters() { @@ -281,8 +276,8 @@ public List getBackupMasters() { /** * @param sn * @return Server's load or null if not found. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLiveServerMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLiveServerMetrics} instead. */ @Deprecated public ServerLoad getLoad(final ServerName sn) { @@ -300,8 +295,8 @@ public List getMasterCoprocessorNames() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMasterCoprocessorNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getMasterCoprocessorNames} instead. */ @Deprecated public String[] getMasterCoprocessors() { @@ -310,8 +305,8 @@ public String[] getMasterCoprocessors() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLastMajorCompactionTimestamp(TableName)} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLastMajorCompactionTimestamp(TableName)} instead. */ @Deprecated public long getLastMajorCompactionTsForTable(TableName table) { @@ -319,8 +314,8 @@ public long getLastMajorCompactionTsForTable(TableName table) { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLastMajorCompactionTimestamp(byte[])} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLastMajorCompactionTimestamp(byte[])} instead. */ @Deprecated public long getLastMajorCompactionTsForRegion(final byte[] region) { @@ -328,8 +323,7 @@ public long getLastMajorCompactionTsForRegion(final byte[] region) { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean isBalancerOn() { @@ -369,15 +363,15 @@ public String toString() { int backupMastersSize = getBackupMastersSize(); sb.append("\nNumber of backup masters: " + backupMastersSize); if (backupMastersSize > 0) { - for (ServerName serverName: metrics.getBackupMasterNames()) { + for (ServerName serverName : metrics.getBackupMasterNames()) { sb.append("\n " + serverName); } } int serversSize = getServersSize(); int serversNameSize = getServersName().size(); - sb.append("\nNumber of live region servers: " - + (serversSize > 0 ? serversSize : serversNameSize)); + sb.append( + "\nNumber of live region servers: " + (serversSize > 0 ? serversSize : serversNameSize)); if (serversSize > 0) { for (ServerName serverName : metrics.getLiveServerMetrics().keySet()) { sb.append("\n " + serverName.getServerName()); @@ -403,7 +397,7 @@ public String toString() { int ritSize = metrics.getRegionStatesInTransition().size(); sb.append("\nNumber of regions in transition: " + ritSize); if (ritSize > 0) { - for (RegionState state: metrics.getRegionStatesInTransition()) { + for (RegionState state : metrics.getRegionStatesInTransition()) { sb.append("\n " + state.toDescriptiveString()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java index 86aca2bc8177..b8b2519dc09f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ConcurrentTableModificationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java index c0d9b603a8ab..eee678d26453 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Coprocessor.java @@ -7,33 +7,28 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase; +import com.google.protobuf.Service; import java.io.IOException; import java.util.Collections; - -import com.google.protobuf.Service; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * Base interface for the 4 coprocessors - MasterCoprocessor, RegionCoprocessor, - * RegionServerCoprocessor, and WALCoprocessor. - * Do NOT implement this interface directly. Unless an implementation implements one (or more) of - * the above mentioned 4 coprocessors, it'll fail to be loaded by any coprocessor host. - * - * Example: - * Building a coprocessor to observe Master operations. + * RegionServerCoprocessor, and WALCoprocessor. Do NOT implement this interface directly. Unless an + * implementation implements one (or more) of the above mentioned 4 coprocessors, it'll fail to be + * loaded by any coprocessor host. Example: Building a coprocessor to observe Master operations. + * *
      * class MyMasterCoprocessor implements MasterCoprocessor {
      *   @Override
    @@ -48,6 +43,7 @@
      * 
    * * Building a Service which can be loaded by both Master and RegionServer + * *
      * class MyCoprocessorService implements MasterCoprocessor, RegionServerCoprocessor {
      *   @Override
    @@ -75,30 +71,26 @@ public interface Coprocessor {
        * Lifecycle state of a given coprocessor instance.
        */
       enum State {
    -    UNINSTALLED,
    -    INSTALLED,
    -    STARTING,
    -    ACTIVE,
    -    STOPPING,
    -    STOPPED
    +    UNINSTALLED, INSTALLED, STARTING, ACTIVE, STOPPING, STOPPED
       }
     
       /**
        * Called by the {@link CoprocessorEnvironment} during it's own startup to initialize the
        * coprocessor.
        */
    -  default void start(CoprocessorEnvironment env) throws IOException {}
    +  default void start(CoprocessorEnvironment env) throws IOException {
    +  }
     
       /**
    -   * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the
    -   * coprocessor.
    +   * Called by the {@link CoprocessorEnvironment} during it's own shutdown to stop the coprocessor.
        */
    -  default void stop(CoprocessorEnvironment env) throws IOException {}
    +  default void stop(CoprocessorEnvironment env) throws IOException {
    +  }
     
       /**
        * Coprocessor endpoints providing protobuf services should override this method.
    -   * @return Iterable of {@link Service}s or empty collection. Implementations should never
    -   * return null.
    +   * @return Iterable of {@link Service}s or empty collection. Implementations should never return
    +   *         null.
        */
       default Iterable getServices() {
         return Collections.EMPTY_SET;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    index 4fab7333dcd9..edbc5f479d6e 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java
    @@ -7,16 +7,14 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
    -
     package org.apache.hadoop.hbase;
     
     import org.apache.hadoop.conf.Configuration;
    @@ -46,8 +44,8 @@ public interface CoprocessorEnvironment {
       int getLoadSequence();
     
       /**
    -   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try
    -   *   to set a configuration.
    +   * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to
    +   *         set a configuration.
        */
       Configuration getConfiguration();
     
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    index 509844e367d8..6ee6299daa56 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DoNotRetryIOException.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    index 76f374c412f0..718d40c2340b 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/DroppedSnapshotException.java
    @@ -7,24 +7,22 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
     package org.apache.hadoop.hbase;
     
     import java.io.IOException;
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Thrown during flush if the possibility snapshot content was not properly
    - * persisted into store files.  Response should include replay of wal content.
    + * Thrown during flush if the possibility snapshot content was not properly persisted into store
    + * files. Response should include replay of wal content.
      */
     @InterfaceAudience.Public
     public class DroppedSnapshotException extends IOException {
    @@ -43,7 +41,6 @@ public DroppedSnapshotException(String message) {
     
       /**
        * DroppedSnapshotException with cause
    -   *
        * @param message the message for this exception
        * @param cause the cause for this exception
        */
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/FailedCloseWALAfterInitializedErrorException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/FailedCloseWALAfterInitializedErrorException.java
    index 6445be9cfaf8..cc778aff8c4c 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/FailedCloseWALAfterInitializedErrorException.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/FailedCloseWALAfterInitializedErrorException.java
    @@ -7,28 +7,24 @@
      * "License"); you may not use this file except in compliance
      * with the License.  You may obtain a copy of the License at
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
    - * Unless required by applicable law or agreed to in writing,
    - * software distributed under the License is distributed on an
    - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    - * KIND, either express or implied.  See the License for the
    - * specific language governing permissions and limitations
    - * under the License.
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
      */
     package org.apache.hadoop.hbase;
     
    -
     import java.io.IOException;
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Throw when failed cleanup unsuccessful initialized wal
      */
     @InterfaceAudience.Public
    -public class FailedCloseWALAfterInitializedErrorException
    -  extends IOException {
    +public class FailedCloseWALAfterInitializedErrorException extends IOException {
     
       private static final long serialVersionUID = -5463156587431677322L;
     
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    index 2f21d60878bf..7a187eef41d1 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,8 +18,6 @@
     package org.apache.hadoop.hbase;
     
     import java.util.Map;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
     import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
     import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor;
    @@ -32,30 +29,39 @@
     import org.apache.hadoop.hbase.regionserver.BloomType;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.hbase.util.PrettyPrinter.Unit;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * An HColumnDescriptor contains information about a column family such as the
    - * number of versions, compression settings, etc.
    - *
    - * It is used as input when creating a table or adding a column.
    + * An HColumnDescriptor contains information about a column family such as the number of versions,
    + * compression settings, etc. It is used as input when creating a table or adding a column.
      */
     @InterfaceAudience.Public
     @Deprecated // remove it in 3.0
     public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable {
    -  public static final String IN_MEMORY_COMPACTION = ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION;
    +  public static final String IN_MEMORY_COMPACTION =
    +      ColumnFamilyDescriptorBuilder.IN_MEMORY_COMPACTION;
       public static final String COMPRESSION = ColumnFamilyDescriptorBuilder.COMPRESSION;
    -  public static final String COMPRESSION_COMPACT = ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT;
    -  public static final String COMPRESSION_COMPACT_MAJOR = ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT_MAJOR;
    -  public static final String COMPRESSION_COMPACT_MINOR = ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT_MINOR;
    +  public static final String COMPRESSION_COMPACT =
    +      ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT;
    +  public static final String COMPRESSION_COMPACT_MAJOR =
    +      ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT_MAJOR;
    +  public static final String COMPRESSION_COMPACT_MINOR =
    +      ColumnFamilyDescriptorBuilder.COMPRESSION_COMPACT_MINOR;
       public static final String ENCODE_ON_DISK = "ENCODE_ON_DISK";
    -  public static final String DATA_BLOCK_ENCODING = ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING;
    +  public static final String DATA_BLOCK_ENCODING =
    +      ColumnFamilyDescriptorBuilder.DATA_BLOCK_ENCODING;
       public static final String BLOCKCACHE = ColumnFamilyDescriptorBuilder.BLOCKCACHE;
    -  public static final String CACHE_DATA_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_DATA_ON_WRITE;
    -  public static final String CACHE_INDEX_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_INDEX_ON_WRITE;
    -  public static final String CACHE_BLOOMS_ON_WRITE = ColumnFamilyDescriptorBuilder.CACHE_BLOOMS_ON_WRITE;
    -  public static final String EVICT_BLOCKS_ON_CLOSE = ColumnFamilyDescriptorBuilder.EVICT_BLOCKS_ON_CLOSE;
    +  public static final String CACHE_DATA_ON_WRITE =
    +      ColumnFamilyDescriptorBuilder.CACHE_DATA_ON_WRITE;
    +  public static final String CACHE_INDEX_ON_WRITE =
    +      ColumnFamilyDescriptorBuilder.CACHE_INDEX_ON_WRITE;
    +  public static final String CACHE_BLOOMS_ON_WRITE =
    +      ColumnFamilyDescriptorBuilder.CACHE_BLOOMS_ON_WRITE;
    +  public static final String EVICT_BLOCKS_ON_CLOSE =
    +      ColumnFamilyDescriptorBuilder.EVICT_BLOCKS_ON_CLOSE;
       public static final String CACHE_DATA_IN_L1 = "CACHE_DATA_IN_L1";
    -  public static final String PREFETCH_BLOCKS_ON_OPEN = ColumnFamilyDescriptorBuilder.PREFETCH_BLOCKS_ON_OPEN;
    +  public static final String PREFETCH_BLOCKS_ON_OPEN =
    +      ColumnFamilyDescriptorBuilder.PREFETCH_BLOCKS_ON_OPEN;
       public static final String BLOCKSIZE = ColumnFamilyDescriptorBuilder.BLOCKSIZE;
       public static final String LENGTH = "LENGTH";
       public static final String TTL = ColumnFamilyDescriptorBuilder.TTL;
    @@ -72,46 +78,62 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable:
    +   * Construct a column descriptor specifying only the family name The other attributes are
    +   * defaulted.
    +   * @param familyName Column family name. Must be 'printable' -- digit or letter -- and may not
    +   *          contain a :
        * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    -   *             (HBASE-18433).
    -   *             Use {@link ColumnFamilyDescriptorBuilder#of(String)}.
    +   *             (HBASE-18433). Use
    +   *             {@link ColumnFamilyDescriptorBuilder#of(String)}.
        */
       @Deprecated
       public HColumnDescriptor(final String familyName) {
    @@ -119,29 +141,26 @@ public HColumnDescriptor(final String familyName) {
       }
     
       /**
    -   * Construct a column descriptor specifying only the family name
    -   * The other attributes are defaulted.
    -   *
    -   * @param familyName Column family name. Must be 'printable' -- digit or
    -   * letter -- and may not contain a :
    +   * Construct a column descriptor specifying only the family name The other attributes are
    +   * defaulted.
    +   * @param familyName Column family name. Must be 'printable' -- digit or letter -- and may not
    +   *          contain a :
        * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    -   *             (HBASE-18433).
    -   *             Use {@link ColumnFamilyDescriptorBuilder#of(byte[])}.
    +   *             (HBASE-18433). Use
    +   *             {@link ColumnFamilyDescriptorBuilder#of(byte[])}.
        */
       @Deprecated
    -  public HColumnDescriptor(final byte [] familyName) {
    +  public HColumnDescriptor(final byte[] familyName) {
         this(new ModifyableColumnFamilyDescriptor(familyName));
       }
     
       /**
    -   * Constructor.
    -   * Makes a deep copy of the supplied descriptor.
    -   * Can make a modifiable descriptor from an UnmodifyableHColumnDescriptor.
    -   *
    +   * Constructor. Makes a deep copy of the supplied descriptor. Can make a modifiable descriptor
    +   * from an UnmodifyableHColumnDescriptor.
        * @param desc The descriptor.
        * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    -   *             (HBASE-18433).
    -   *             Use {@link ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}.
    +   *             (HBASE-18433). Use
    +   *             {@link ColumnFamilyDescriptorBuilder#copy(ColumnFamilyDescriptor)}.
        */
       @Deprecated
       public HColumnDescriptor(HColumnDescriptor desc) {
    @@ -149,8 +168,7 @@ public HColumnDescriptor(HColumnDescriptor desc) {
       }
     
       protected HColumnDescriptor(HColumnDescriptor desc, boolean deepClone) {
    -    this(deepClone ? new ModifyableColumnFamilyDescriptor(desc)
    -            : desc.delegatee);
    +    this(deepClone ? new ModifyableColumnFamilyDescriptor(desc) : desc.delegatee);
       }
     
       protected HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) {
    @@ -160,17 +178,17 @@ protected HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) {
       /**
        * @param b Family name.
        * @return b
    -   * @throws IllegalArgumentException If not null and not a legitimate family
    -   * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because
    -   * b can be null when deserializing).  Cannot start with a '.'
    -   * either. Also Family can not be an empty value or equal "recovered.edits".
    +   * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable'
    +   *           and ends in a ':' (Null passes are allowed because b can be null when
    +   *           deserializing). Cannot start with a '.' either. Also Family can not be an empty value
    +   *           or equal "recovered.edits".
        * @deprecated since 2.0.0 and will be removed in 3.0.0. Use
    -   *   {@link ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])} instead.
    +   *             {@link ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])} instead.
        * @see ColumnFamilyDescriptorBuilder#isLegalColumnFamilyName(byte[])
        * @see HBASE-18008
        */
       @Deprecated
    -  public static byte [] isLegalFamilyName(final byte [] b) {
    +  public static byte[] isLegalFamilyName(final byte[] b) {
         return ColumnFamilyDescriptorBuilder.isLegalColumnFamilyName(b);
       }
     
    @@ -178,7 +196,7 @@ protected HColumnDescriptor(ModifyableColumnFamilyDescriptor delegate) {
        * @return Name of this column family
        */
       @Override
    -  public byte [] getName() {
    +  public byte[] getName() {
         return delegatee.getName();
       }
     
    @@ -226,7 +244,7 @@ public HColumnDescriptor setValue(byte[] key, byte[] value) {
       /**
        * @param key Key whose key and value we're to remove from HCD parameters.
        */
    -  public void remove(final byte [] key) {
    +  public void remove(final byte[] key) {
         getDelegateeForModification().removeValue(new Bytes(key));
       }
     
    @@ -243,8 +261,8 @@ public HColumnDescriptor setValue(String key, String value) {
       /**
        * @return compression type being used for the column family
        * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    -   *             (HBASE-13655).
    -   *             Use {@link #getCompressionType()}.
    +   *             (HBASE-13655). Use
    +   *             {@link #getCompressionType()}.
        */
       @Deprecated
       public Compression.Algorithm getCompression() {
    @@ -252,10 +270,10 @@ public Compression.Algorithm getCompression() {
       }
     
       /**
    -   *  @return compression type being used for the column family for major compaction
    -   *  @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    -   *             (HBASE-13655).
    -   *             Use {@link #getCompactionCompressionType()}.
    +   * @return compression type being used for the column family for major compaction
    +   * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
    +   *             (HBASE-13655). Use
    +   *             {@link #getCompactionCompressionType()}.
        */
       @Deprecated
       public Compression.Algorithm getCompactionCompression() {
    @@ -278,7 +296,6 @@ public HColumnDescriptor setMaxVersions(int value) {
     
       /**
        * Set minimum and maximum versions to keep
    -   *
        * @param minVersions minimal number of versions
        * @param maxVersions maximum number of versions
        * @return this (for chained invocation)
    @@ -291,9 +308,9 @@ public HColumnDescriptor setVersions(int minVersions, int maxVersions) {
         }
     
         if (maxVersions < minVersions) {
    -      throw new IllegalArgumentException("Unable to set MaxVersion to " + maxVersions
    -        + " and set MinVersion to " + minVersions
    -        + ", as maximum versions must be >= minimum versions.");
    +      throw new IllegalArgumentException(
    +          "Unable to set MaxVersion to " + maxVersions + " and set MinVersion to " + minVersions
    +              + ", as maximum versions must be >= minimum versions.");
         }
         setMinVersions(minVersions);
         setMaxVersions(maxVersions);
    @@ -306,8 +323,7 @@ public int getBlocksize() {
       }
     
       /**
    -   * @param value Blocksize to use when writing out storefiles/hfiles on this
    -   * column family.
    +   * @param value Blocksize to use when writing out storefiles/hfiles on this column family.
        * @return this (for chained invocation)
        */
       public HColumnDescriptor setBlocksize(int value) {
    @@ -326,10 +342,9 @@ public Compression.Algorithm getCompressionType() {
       }
     
       /**
    -   * Compression types supported in hbase.
    -   * LZO is not bundled as part of the hbase distribution.
    -   * See LZO Compression
    -   * for how to enable it.
    +   * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. See
    +   * LZO Compression for how to
    +   * enable it.
        * @param value Compression type setting.
        * @return this (for chained invocation)
        */
    @@ -356,7 +371,6 @@ public HColumnDescriptor setDataBlockEncoding(DataBlockEncoding value) {
       /**
        * Set whether the tags should be compressed along with DataBlockEncoding. When no
        * DataBlockEncoding is been used, this is having no effect.
    -   *
        * @param value
        * @return this (for chained invocation)
        */
    @@ -386,10 +400,9 @@ public Compression.Algorithm getMinorCompactionCompressionType() {
       }
     
       /**
    -   * Compression types supported in hbase.
    -   * LZO is not bundled as part of the hbase distribution.
    -   * See LZO Compression
    -   * for how to enable it.
    +   * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. See
    +   * LZO Compression for how to
    +   * enable it.
        * @param value Compression type setting.
        * @return this (for chained invocation)
        */
    @@ -415,7 +428,7 @@ public boolean isInMemory() {
     
       /**
        * @param value True if we are to favor keeping all values for this column family in the
    -   * HRegionServer cache
    +   *          HRegionServer cache
        * @return this (for chained invocation)
        */
       public HColumnDescriptor setInMemory(boolean value) {
    @@ -429,8 +442,7 @@ public MemoryCompactionPolicy getInMemoryCompaction() {
       }
     
       /**
    -   * @param value the prefered in-memory compaction policy
    -   *                  for this column family
    +   * @param value the prefered in-memory compaction policy for this column family
        * @return this (for chained invocation)
        */
       public HColumnDescriptor setInMemoryCompaction(MemoryCompactionPolicy value) {
    @@ -444,8 +456,7 @@ public KeepDeletedCells getKeepDeletedCells() {
       }
     
       /**
    -   * @param value True if deleted rows should not be collected
    -   * immediately.
    +   * @param value True if deleted rows should not be collected immediately.
        * @return this (for chained invocation)
        */
       public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells value) {
    @@ -454,9 +465,9 @@ public HColumnDescriptor setKeepDeletedCells(KeepDeletedCells value) {
       }
     
       /**
    -   * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts
    -   * will mask a later Put with lower ts. Set this to true to enable new semantics of versions.
    -   * We will also consider mvcc in versions. See HBASE-15968 for details.
    +   * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts will
    +   * mask a later Put with lower ts. Set this to true to enable new semantics of versions. We will
    +   * also consider mvcc in versions. See HBASE-15968 for details.
        */
       @Override
       public boolean isNewVersionBehavior() {
    @@ -468,7 +479,6 @@ public HColumnDescriptor setNewVersionBehavior(boolean newVersionBehavior) {
         return this;
       }
     
    -
       @Override
       public int getTimeToLive() {
         return delegatee.getTimeToLive();
    @@ -485,7 +495,7 @@ public HColumnDescriptor setTimeToLive(int value) {
     
       /**
        * @param value Time to live of cell contents, in human readable format
    -   *                   @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
    +   * @see org.apache.hadoop.hbase.util.PrettyPrinter#format(String, Unit)
        * @return this (for chained invocation)
        */
       public HColumnDescriptor setTimeToLive(String value) throws HBaseException {
    @@ -499,8 +509,7 @@ public int getMinVersions() {
       }
     
       /**
    -   * @param value The minimum number of versions to keep.
    -   * (used when timeToLive is set)
    +   * @param value The minimum number of versions to keep. (used when timeToLive is set)
        * @return this (for chained invocation)
        */
       public HColumnDescriptor setMinVersions(int value) {
    @@ -514,8 +523,8 @@ public boolean isBlockCacheEnabled() {
       }
     
       /**
    -   * @param value True if hfile DATA type blocks should be cached (We always cache
    -   * INDEX and BLOOM blocks; you cannot turn this off).
    +   * @param value True if hfile DATA type blocks should be cached (We always cache INDEX and BLOOM
    +   *          blocks; you cannot turn this off).
        * @return this (for chained invocation)
        */
       public HColumnDescriptor setBlockCacheEnabled(boolean value) {
    @@ -542,10 +551,10 @@ public int getScope() {
         return delegatee.getScope();
       }
     
    - /**
    -  * @param value the scope tag
    -  * @return this (for chained invocation)
    -  */
    +  /**
    +   * @param value the scope tag
    +   * @return this (for chained invocation)
    +   */
       public HColumnDescriptor setScope(int value) {
         getDelegateeForModification().setScope(value);
         return this;
    @@ -567,7 +576,6 @@ public HColumnDescriptor setCacheDataOnWrite(boolean value) {
     
       /**
        * This is a noop call from HBase 2.0 onwards
    -   *
        * @return this (for chained invocation)
        * @deprecated Since 2.0 and will be removed in 3.0 with out any replacement. Caching data in on
        *             heap Cache, when there are both on heap LRU Cache and Bucket Cache will no longer
    @@ -612,8 +620,7 @@ public boolean isEvictBlocksOnClose() {
       }
     
       /**
    -   * @param value true if we should evict cached blocks from the blockcache on
    -   * close
    +   * @param value true if we should evict cached blocks from the blockcache on close
        * @return this (for chained invocation)
        */
       public HColumnDescriptor setEvictBlocksOnClose(boolean value) {
    @@ -700,7 +707,7 @@ public byte[] toByteArray() {
        * @throws DeserializationException
        * @see #toByteArray()
        */
    -  public static HColumnDescriptor parseFrom(final byte [] bytes) throws DeserializationException {
    +  public static HColumnDescriptor parseFrom(final byte[] bytes) throws DeserializationException {
         ColumnFamilyDescriptor desc = ColumnFamilyDescriptorBuilder.parseFrom(bytes);
         if (desc instanceof ModifyableColumnFamilyDescriptor) {
           return new HColumnDescriptor((ModifyableColumnFamilyDescriptor) desc);
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    index 2a0e804ff7ab..0b380cae559a 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -24,7 +23,6 @@
     import java.util.Arrays;
     import java.util.List;
     import java.util.stream.Collectors;
    -
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.hbase.KeyValue.KVComparator;
     import org.apache.hadoop.hbase.client.RegionInfo;
    @@ -38,40 +36,38 @@
     import org.apache.yetus.audience.InterfaceAudience;
     import org.slf4j.Logger;
     import org.slf4j.LoggerFactory;
    +
     import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
     import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
     
     /**
      * Information about a region. A region is a range of keys in the whole keyspace of a table, an
    - * identifier (a timestamp) for differentiating between subset ranges (after region split)
    - * and a replicaId for differentiating the instance for the same range and some status information
    - * about the region.
    - *
    - * The region has a unique name which consists of the following fields:
    + * identifier (a timestamp) for differentiating between subset ranges (after region split) and a
    + * replicaId for differentiating the instance for the same range and some status information about
    + * the region. The region has a unique name which consists of the following fields:
      * 
      - *
    • tableName : The name of the table
    • - *
    • startKey : The startKey for the region.
    • - *
    • regionId : A timestamp when the region is created.
    • - *
    • replicaId : An id starting from 0 to differentiate replicas of the same region range - * but hosted in separated servers. The same region range can be hosted in multiple locations.
    • - *
    • encodedName : An MD5 encoded string for the region name.
    • + *
    • tableName : The name of the table
    • + *
    • startKey : The startKey for the region.
    • + *
    • regionId : A timestamp when the region is created.
    • + *
    • replicaId : An id starting from 0 to differentiate replicas of the same region range but + * hosted in separated servers. The same region range can be hosted in multiple locations.
    • + *
    • encodedName : An MD5 encoded string for the region name.
    • *
    - * - *
    Other than the fields in the region name, region info contains: + *
    + * Other than the fields in the region name, region info contains: *
      - *
    • endKey : the endKey for the region (exclusive)
    • - *
    • split : Whether the region is split
    • - *
    • offline : Whether the region is offline
    • + *
    • endKey : the endKey for the region (exclusive)
    • + *
    • split : Whether the region is split
    • + *
    • offline : Whether the region is offline
    • *
    - * * In 0.98 or before, a list of table's regions would fully cover the total keyspace, and at any * point in time, a row key always belongs to a single region, which is hosted in a single server. * In 0.99+, a region can have multiple instances (called replicas), and thus a range (or row) can * correspond to multiple HRegionInfo's. These HRI's share the same fields however except the * replicaId field. If the replicaId is not set, it defaults to 0, which is compatible with the * previous behavior of a range corresponding to 1 region. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * use {@link RegionInfoBuilder} to build {@link RegionInfo}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. use + * {@link RegionInfoBuilder} to build {@link RegionInfo}. */ @Deprecated @InterfaceAudience.Public @@ -79,41 +75,32 @@ public class HRegionInfo implements RegionInfo { private static final Logger LOG = LoggerFactory.getLogger(HRegionInfo.class); /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

    - * **NOTE** - * - * The first hbase:meta region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. + * The new format for a region name contains its encodedName at the end. The encoded name also + * serves as the directory name for the region in the filesystem. New region name format: + * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. where, <encodedName> + * is a hex version of the MD5 hash of <tablename>,<startkey>,<regionIdTimestamp> The old + * region name format: <tablename>,<startkey>,<regionIdTimestamp> For region names in the + * old format, the encoded name is a 32-bit JenkinsHash integer value (in its decimal notation, + * string form). + *

    + * **NOTE** The first hbase:meta region, and regions created by an older version of HBase (0.20 or + * prior) will continue to use the old region name format. */ /** A non-capture group so that this can be embedded. */ - public static final String ENCODED_REGION_NAME_REGEX = RegionInfoBuilder.ENCODED_REGION_NAME_REGEX; + public static final String ENCODED_REGION_NAME_REGEX = + RegionInfoBuilder.ENCODED_REGION_NAME_REGEX; private static final int MAX_REPLICA_ID = 0xFFFF; /** * @param regionName * @return the encodedName - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#encodeRegionName(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#encodeRegionName(byte[])}. */ @Deprecated - public static String encodeRegionName(final byte [] regionName) { + public static String encodeRegionName(final byte[] regionName) { return RegionInfo.encodeRegionName(regionName); } @@ -126,19 +113,19 @@ public String getShortNameToLog() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#getShortNameToLog(RegionInfo...)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#getShortNameToLog(RegionInfo...)}. */ @Deprecated - public static String getShortNameToLog(HRegionInfo...hris) { + public static String getShortNameToLog(HRegionInfo... hris) { return RegionInfo.getShortNameToLog(Arrays.asList(hris)); } /** - * @return Return a String of short, printable names for hris - * (usually encoded name) for us logging. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#getShortNameToLog(List)})}. + * @return Return a String of short, printable names for hris (usually encoded name) + * for us logging. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#getShortNameToLog(List)})}. */ @Deprecated public static String getShortNameToLog(final List hris) { @@ -149,9 +136,9 @@ public static String getShortNameToLog(final List hris) { * Use logging. * @param encodedRegionName The encoded regionname. * @return hbase:meta if passed 1028785192 else returns - * encodedRegionName - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#prettyPrint(String)}. + * encodedRegionName + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#prettyPrint(String)}. */ @Deprecated @InterfaceAudience.Private @@ -159,7 +146,7 @@ public static String prettyPrint(final String encodedRegionName) { return RegionInfo.prettyPrint(encodedRegionName); } - private byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; + private byte[] endKey = HConstants.EMPTY_BYTE_ARRAY; // This flag is in the parent of a split while the parent is still referenced by daughter regions. // We USED to set this flag when we disabled a table but now table state is kept up in zookeeper // as of 0.90.0 HBase. And now in DisableTableProcedure, finally we will create bunch of @@ -167,14 +154,14 @@ public static String prettyPrint(final String encodedRegionName) { // will not change the offLine flag. private boolean offLine = false; private long regionId = -1; - private transient byte [] regionName = HConstants.EMPTY_BYTE_ARRAY; + private transient byte[] regionName = HConstants.EMPTY_BYTE_ARRAY; private boolean split = false; - private byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; + private byte[] startKey = HConstants.EMPTY_BYTE_ARRAY; private int hashCode = -1; - //TODO: Move NO_HASH to HStoreFile which is really the only place it is used. + // TODO: Move NO_HASH to HStoreFile which is really the only place it is used. public static final String NO_HASH = null; private String encodedName = null; - private byte [] encodedNameAsBytes = null; + private byte[] encodedNameAsBytes = null; private int replicaId = DEFAULT_REPLICA_ID; // Current TableName @@ -202,8 +189,7 @@ private void setHashCode() { } /** - * Private constructor used constructing HRegionInfo for the - * first meta regions + * Private constructor used constructing HRegionInfo for the first meta regions */ private HRegionInfo(long regionId, TableName tableName) { this(regionId, tableName, DEFAULT_REPLICA_ID); @@ -225,66 +211,59 @@ public HRegionInfo(final TableName tableName) { /** * Construct HRegionInfo with explicit parameters - * * @param tableName the table name * @param startKey first key in region * @param endKey end of key range * @throws IllegalArgumentException */ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey) - throws IllegalArgumentException { + throws IllegalArgumentException { this(tableName, startKey, endKey, false); } /** * Construct HRegionInfo with explicit parameters - * * @param tableName the table descriptor * @param startKey first key in region * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. + * @param split true if this region has split and we have daughter regions regions that may or may + * not hold references to this region. * @throws IllegalArgumentException */ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, - final boolean split) - throws IllegalArgumentException { + final boolean split) throws IllegalArgumentException { this(tableName, startKey, endKey, split, EnvironmentEdgeManager.currentTime()); } /** * Construct HRegionInfo with explicit parameters - * * @param tableName the table descriptor * @param startKey first key in region * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. + * @param split true if this region has split and we have daughter regions regions that may or may + * not hold references to this region. * @param regionid Region id to use. * @throws IllegalArgumentException */ - public HRegionInfo(final TableName tableName, final byte[] startKey, - final byte[] endKey, final boolean split, final long regionid) - throws IllegalArgumentException { + public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, + final boolean split, final long regionid) throws IllegalArgumentException { this(tableName, startKey, endKey, split, regionid, DEFAULT_REPLICA_ID); } /** * Construct HRegionInfo with explicit parameters - * * @param tableName the table descriptor * @param startKey first key in region * @param endKey end of key range - * @param split true if this region has split and we have daughter regions - * regions that may or may not hold references to this region. + * @param split true if this region has split and we have daughter regions regions that may or may + * not hold references to this region. * @param regionid Region id to use. * @param replicaId the replicaId to use * @throws IllegalArgumentException */ - public HRegionInfo(final TableName tableName, final byte[] startKey, - final byte[] endKey, final boolean split, final long regionid, - final int replicaId) - throws IllegalArgumentException { + public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, + final boolean split, final long regionid, final int replicaId) + throws IllegalArgumentException { super(); if (tableName == null) { throw new IllegalArgumentException("TableName cannot be null"); @@ -300,16 +279,14 @@ public HRegionInfo(final TableName tableName, final byte[] startKey, this.regionName = createRegionName(this.tableName, startKey, regionId, replicaId, true); this.split = split; - this.endKey = endKey == null? HConstants.EMPTY_END_ROW: endKey.clone(); - this.startKey = startKey == null? - HConstants.EMPTY_START_ROW: startKey.clone(); + this.endKey = endKey == null ? HConstants.EMPTY_END_ROW : endKey.clone(); + this.startKey = startKey == null ? HConstants.EMPTY_START_ROW : startKey.clone(); this.tableName = tableName; setHashCode(); } /** * Costruct a copy of another HRegionInfo - * * @param other */ public HRegionInfo(RegionInfo other) { @@ -337,16 +314,16 @@ public HRegionInfo(HRegionInfo other, int replicaId) { * @param tableName * @param startKey Can be null * @param regionid Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], long, boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], long, boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final long regionid, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final long regionid, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, Long.toString(regionid), newFormat); } @@ -355,16 +332,16 @@ public HRegionInfo(HRegionInfo other, int replicaId) { * @param tableName * @param startKey Can be null * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], String, boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], String, boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final String id, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final String id, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, Bytes.toBytes(id), newFormat); } @@ -374,18 +351,18 @@ public HRegionInfo(HRegionInfo other, int replicaId) { * @param startKey Can be null * @param regionid Region id (Usually timestamp from when region was created). * @param replicaId - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey, id and replicaId - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], long, int, boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], long, int, boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final long regionid, int replicaId, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final long regionid, int replicaId, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionid)), - replicaId, newFormat); + replicaId, newFormat); } /** @@ -393,18 +370,19 @@ public HRegionInfo(HRegionInfo other, int replicaId) { * @param tableName * @param startKey Can be null * @param id Region id (Usually timestamp from when region was created). - * @param newFormat should we create the region name in the new format - * (such that it contains its encoded name?). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey and id - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], byte[], boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], byte[], boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final byte [] id, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final byte[] id, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, id, DEFAULT_REPLICA_ID, newFormat); } + /** * Make a region name of passed parameters. * @param tableName @@ -413,13 +391,13 @@ public HRegionInfo(HRegionInfo other, int replicaId) { * @param replicaId * @param newFormat should we create the region name in the new format * @return Region name made of passed tableName, startKey, id and replicaId - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#createRegionName(TableName, byte[], byte[], int, boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#createRegionName(TableName, byte[], byte[], int, boolean)}. */ @Deprecated @InterfaceAudience.Private - public static byte [] createRegionName(final TableName tableName, - final byte [] startKey, final byte [] id, final int replicaId, boolean newFormat) { + public static byte[] createRegionName(final TableName tableName, final byte[] startKey, + final byte[] id, final int replicaId, boolean newFormat) { return RegionInfo.createRegionName(tableName, startKey, id, replicaId, newFormat); } @@ -427,11 +405,11 @@ public HRegionInfo(HRegionInfo other, int replicaId) { * Gets the table name from the specified region name. * @param regionName to extract the table name from * @return Table name - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#getTable(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#getTable(byte[])}. */ @Deprecated - public static TableName getTable(final byte [] regionName) { + public static TableName getTable(final byte[] regionName) { return RegionInfo.getTable(regionName); } @@ -439,8 +417,8 @@ public static TableName getTable(final byte [] regionName) { * Gets the start key from the specified region name. * @param regionName * @return Start key. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#getStartKey(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#getStartKey(byte[])}. */ @Deprecated public static byte[] getStartKey(final byte[] regionName) throws IOException { @@ -452,22 +430,21 @@ public static byte[] getStartKey(final byte[] regionName) throws IOException { * @param regionName * @return Array of byte[] containing tableName, startKey and id * @throws IOException - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#parseRegionName(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#parseRegionName(byte[])}. */ @Deprecated @InterfaceAudience.Private - public static byte [][] parseRegionName(final byte [] regionName) throws IOException { + public static byte[][] parseRegionName(final byte[] regionName) throws IOException { return RegionInfo.parseRegionName(regionName); } /** - * * @param regionName * @return if region name is encoded. * @throws IOException - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#isEncodedRegionName(byte[])}. */ @Deprecated public static boolean isEncodedRegionName(byte[] regionName) throws IOException { @@ -476,7 +453,7 @@ public static boolean isEncodedRegionName(byte[] regionName) throws IOException /** @return the regionId */ @Override - public long getRegionId(){ + public long getRegionId() { return regionId; } @@ -485,7 +462,7 @@ public long getRegionId(){ * @see #getRegionNameAsString() */ @Override - public byte [] getRegionName(){ + public byte[] getRegionName() { return regionName; } @@ -517,7 +494,7 @@ public synchronized String getEncodedName() { } @Override - public synchronized byte [] getEncodedNameAsBytes() { + public synchronized byte[] getEncodedNameAsBytes() { if (this.encodedNameAsBytes == null) { this.encodedNameAsBytes = Bytes.toBytes(getEncodedName()); } @@ -528,7 +505,7 @@ public synchronized String getEncodedName() { * @return the startKey */ @Override - public byte [] getStartKey(){ + public byte[] getStartKey() { return startKey; } @@ -536,7 +513,7 @@ public synchronized String getEncodedName() { * @return the endKey */ @Override - public byte [] getEndKey(){ + public byte[] getEndKey() { return endKey; } @@ -547,7 +524,7 @@ public synchronized String getEncodedName() { @Override public TableName getTable() { // This method name should be getTableName but there was already a method getTableName - // that returned a byte array. It is unfortunate given everywhere else, getTableName returns + // that returned a byte array. It is unfortunate given everywhere else, getTableName returns // a TableName instance. if (tableName == null || tableName.getName().length == 0) { tableName = getTable(getRegionName()); @@ -556,24 +533,21 @@ public TableName getTable() { } /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. + * Returns true if the given inclusive range of rows is fully contained by this region. For + * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return + * true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ @Override public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); + throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(rangeStartKey) + + " > " + Bytes.toStringBinary(rangeEndKey)); } boolean firstKeyInRange = Bytes.compareTo(rangeStartKey, startKey) >= 0; - boolean lastKeyInRange = - Bytes.compareTo(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); + boolean lastKeyInRange = Bytes.compareTo(rangeEndKey, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); return firstKeyInRange && lastKeyInRange; } @@ -582,9 +556,8 @@ public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { */ @Override public boolean containsRow(byte[] row) { - return Bytes.compareTo(row, startKey) >= 0 && - (Bytes.compareTo(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); + return Bytes.compareTo(row, startKey) >= 0 + && (Bytes.compareTo(row, endKey) < 0 || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); } /** @@ -599,7 +572,7 @@ public boolean isMetaTable() { */ @Override public boolean isMetaRegion() { - return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTable()); + return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTable()); } /** @@ -633,8 +606,8 @@ public boolean isOffline() { } /** - * The parent of a region split is offline while split daughters hold - * references to the parent. Offlined regions are closed. + * The parent of a region split is offline while split daughters hold references to the parent. + * Offlined regions are closed. * @param offLine Set online/offline status. */ public void setOffline(boolean offLine) { @@ -667,14 +640,11 @@ public int getReplicaId() { */ @Override public String toString() { - return "{ENCODED => " + getEncodedName() + ", " + - HConstants.NAME + " => '" + Bytes.toStringBinary(this.regionName) - + "', STARTKEY => '" + - Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + - Bytes.toStringBinary(this.endKey) + "'" + - (isOffline()? ", OFFLINE => true": "") + - (isSplit()? ", SPLIT => true": "") + - ((replicaId > 0)? ", REPLICA_ID => " + replicaId : "") + "}"; + return "{ENCODED => " + getEncodedName() + ", " + HConstants.NAME + " => '" + + Bytes.toStringBinary(this.regionName) + "', STARTKEY => '" + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + + "'" + (isOffline() ? ", OFFLINE => true" : "") + (isSplit() ? ", SPLIT => true" : "") + + ((replicaId > 0) ? ", REPLICA_ID => " + replicaId : "") + "}"; } /** @@ -691,7 +661,7 @@ public boolean equals(Object o) { if (!(o instanceof HRegionInfo)) { return false; } - return this.compareTo((HRegionInfo)o) == 0; + return this.compareTo((HRegionInfo) o) == 0; } /** @@ -704,17 +674,15 @@ public int hashCode() { /** * @return Comparator to use comparing {@link KeyValue}s. - * @deprecated Use Region#getCellComparator(). deprecated for hbase 2.0, remove for hbase 3.0 + * @deprecated Use Region#getCellComparator(). deprecated for hbase 2.0, remove for hbase 3.0 */ @Deprecated public KVComparator getComparator() { - return isMetaRegion()? - KeyValue.META_COMPARATOR: KeyValue.COMPARATOR; + return isMetaRegion() ? KeyValue.META_COMPARATOR : KeyValue.COMPARATOR; } /** * Convert a HRegionInfo to the protobuf RegionInfo - * * @return the converted RegionInfo */ HBaseProtos.RegionInfo convert() { @@ -723,12 +691,11 @@ HBaseProtos.RegionInfo convert() { /** * Convert a HRegionInfo to a RegionInfo - * * @param info the HRegionInfo to convert * @return the converted RegionInfo - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use toRegionInfo(org.apache.hadoop.hbase.client.RegionInfo) - * in org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * toRegionInfo(org.apache.hadoop.hbase.client.RegionInfo) in + * org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. */ @Deprecated @InterfaceAudience.Private @@ -738,12 +705,11 @@ public static HBaseProtos.RegionInfo convert(final HRegionInfo info) { /** * Convert a RegionInfo to a HRegionInfo - * * @param proto the RegionInfo to convert * @return the converted HRegionInfo - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use toRegionInfo(HBaseProtos.RegionInfo) - * in org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * toRegionInfo(HBaseProtos.RegionInfo) in + * org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil. */ @Deprecated @InterfaceAudience.Private @@ -753,17 +719,11 @@ public static HRegionInfo convert(final HBaseProtos.RegionInfo proto) { // RegionInfo into HRegionInfo which is what is wanted here. HRegionInfo hri; if (ri.isMetaRegion()) { - hri = ri.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID ? - HRegionInfo.FIRST_META_REGIONINFO : - new HRegionInfo(ri.getRegionId(), ri.getTable(), ri.getReplicaId()); + hri = ri.getReplicaId() == RegionInfo.DEFAULT_REPLICA_ID ? HRegionInfo.FIRST_META_REGIONINFO + : new HRegionInfo(ri.getRegionId(), ri.getTable(), ri.getReplicaId()); } else { - hri = new HRegionInfo( - ri.getTable(), - ri.getStartKey(), - ri.getEndKey(), - ri.isSplit(), - ri.getRegionId(), - ri.getReplicaId()); + hri = new HRegionInfo(ri.getTable(), ri.getStartKey(), ri.getEndKey(), ri.isSplit(), + ri.getRegionId(), ri.getReplicaId()); if (proto.hasOffline()) { hri.setOffline(proto.getOffline()); } @@ -774,36 +734,36 @@ public static HRegionInfo convert(final HBaseProtos.RegionInfo proto) { /** * @return This instance serialized as protobuf w/ a magic pb prefix. * @see #parseFrom(byte[]) - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#toByteArray(RegionInfo)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#toByteArray(RegionInfo)}. */ @Deprecated - public byte [] toByteArray() { + public byte[] toByteArray() { return RegionInfo.toByteArray(this); } /** - * @return A deserialized {@link HRegionInfo} - * or null if we failed deserialize or passed bytes null + * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes + * null * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#parseFromOrNull(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#parseFromOrNull(byte[])}. */ @Deprecated - public static HRegionInfo parseFromOrNull(final byte [] bytes) { + public static HRegionInfo parseFromOrNull(final byte[] bytes) { if (bytes == null) return null; return parseFromOrNull(bytes, 0, bytes.length); } /** - * @return A deserialized {@link HRegionInfo} or null - * if we failed deserialize or passed bytes null + * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes + * null * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#parseFromOrNull(byte[], int, int)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#parseFromOrNull(byte[], int, int)}. */ @Deprecated - public static HRegionInfo parseFromOrNull(final byte [] bytes, int offset, int len) { + public static HRegionInfo parseFromOrNull(final byte[] bytes, int offset, int len) { if (bytes == null || len <= 0) return null; try { return parseFrom(bytes, offset, len); @@ -817,10 +777,10 @@ public static HRegionInfo parseFromOrNull(final byte [] bytes, int offset, int l * @return A deserialized {@link HRegionInfo} * @throws DeserializationException * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[])}. */ - public static HRegionInfo parseFrom(final byte [] bytes) throws DeserializationException { + public static HRegionInfo parseFrom(final byte[] bytes) throws DeserializationException { if (bytes == null) return null; return parseFrom(bytes, 0, bytes.length); } @@ -832,11 +792,11 @@ public static HRegionInfo parseFrom(final byte [] bytes) throws DeserializationE * @return A deserialized {@link HRegionInfo} * @throws DeserializationException * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[], int, int)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#parseFrom(byte[], int, int)}. */ @Deprecated - public static HRegionInfo parseFrom(final byte [] bytes, int offset, int len) + public static HRegionInfo parseFrom(final byte[] bytes, int offset, int len) throws DeserializationException { if (ProtobufUtil.isPBMagicPrefix(bytes, offset, len)) { int pblen = ProtobufUtil.lengthOfPBMagic(); @@ -854,28 +814,27 @@ public static HRegionInfo parseFrom(final byte [] bytes, int offset, int len) } /** - * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use - * the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). + * Use this instead of {@link #toByteArray()} when writing to a stream and you want to use the pb + * mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what you want). * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. * @throws IOException * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#toDelimitedByteArray(RegionInfo)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#toDelimitedByteArray(RegionInfo)}. */ @Deprecated - public byte [] toDelimitedByteArray() throws IOException { + public byte[] toDelimitedByteArray() throws IOException { return RegionInfo.toDelimitedByteArray(this); } /** - * Get the descriptive name as {@link RegionState} does it but with hidden - * startkey optionally + * Get the descriptive name as {@link RegionState} does it but with hidden startkey optionally * @param state * @param conf * @return descriptive string - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getDescriptiveNameFromRegionStateForDisplay(RegionState, Configuration) - * over in hbase-server module. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getDescriptiveNameFromRegionStateForDisplay(RegionState, + * Configuration) over in hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -889,9 +848,9 @@ public static String getDescriptiveNameFromRegionStateForDisplay(RegionState sta * @param hri * @param conf * @return the endkey - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getEndKeyForDisplay(RegionInfo, Configuration) - * over in hbase-server module. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getEndKeyForDisplay(RegionInfo, Configuration) over in + * hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -904,9 +863,9 @@ public static byte[] getEndKeyForDisplay(HRegionInfo hri, Configuration conf) { * @param hri * @param conf * @return the startkey - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getStartKeyForDisplay(RegionInfo, Configuration) - * over in hbase-server module. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getStartKeyForDisplay(RegionInfo, Configuration) over in + * hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -919,9 +878,9 @@ public static byte[] getStartKeyForDisplay(HRegionInfo hri, Configuration conf) * @param hri * @param conf * @return region name as String - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getRegionNameAsStringForDisplay(RegionInfo, Configuration) - * over in hbase-server module. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getRegionNameAsStringForDisplay(RegionInfo, Configuration) over + * in hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -934,9 +893,9 @@ public static String getRegionNameAsStringForDisplay(HRegionInfo hri, Configurat * @param hri * @param conf * @return region name bytes - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use RegionInfoDisplay#getRegionNameForDisplay(RegionInfo, Configuration) - * over in hbase-server module. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * RegionInfoDisplay#getRegionNameForDisplay(RegionInfo, Configuration) over in + * hbase-server module. */ @Deprecated @InterfaceAudience.Private @@ -945,13 +904,13 @@ public static byte[] getRegionNameForDisplay(HRegionInfo hri, Configuration conf } /** - * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was + * Parses an HRegionInfo instance from the passed in stream. Presumes the HRegionInfo was * serialized to the stream with {@link #toDelimitedByteArray()} * @param in * @return An instance of HRegionInfo. * @throws IOException - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#parseFrom(DataInputStream)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#parseFrom(DataInputStream)}. */ @Deprecated @InterfaceAudience.Private @@ -959,12 +918,12 @@ public static HRegionInfo parseFrom(final DataInputStream in) throws IOException // I need to be able to move back in the stream if this is not a pb serialization so I can // do the Writable decoding instead. int pblen = ProtobufUtil.lengthOfPBMagic(); - byte [] pbuf = new byte[pblen]; - if (in.markSupported()) { //read it with mark() + byte[] pbuf = new byte[pblen]; + if (in.markSupported()) { // read it with mark() in.mark(pblen); } - //assumption: if Writable serialization, it should be longer than pblen. + // assumption: if Writable serialization, it should be longer than pblen. in.readFully(pbuf, 0, pblen); if (ProtobufUtil.isPBMagicPrefix(pbuf)) { return convert(HBaseProtos.RegionInfo.parseDelimitedFrom(in)); @@ -976,14 +935,14 @@ public static HRegionInfo parseFrom(final DataInputStream in) throws IOException /** * Serializes given HRegionInfo's as a byte array. Use this instead of {@link #toByteArray()} when * writing to a stream and you want to use the pb mergeDelimitedFrom (w/o the delimiter, pb reads - * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can - * be used to read back the instances. + * to EOF which may not be what you want). {@link #parseDelimitedFrom(byte[], int, int)} can be + * used to read back the instances. * @param infos HRegionInfo objects to serialize * @return This instance serialized as a delimited protobuf w/ a magic pb prefix. * @throws IOException * @see #toByteArray() - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#toDelimitedByteArray(RegionInfo...)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#toDelimitedByteArray(RegionInfo...)}. */ @Deprecated @InterfaceAudience.Private @@ -998,8 +957,8 @@ public static byte[] toDelimitedByteArray(HRegionInfo... infos) throws IOExcepti * @param offset the start offset into the byte[] buffer * @param length how far we should read into the byte[] buffer * @return All the hregioninfos that are in the byte array. Keeps reading till we hit the end. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionInfo#parseDelimitedFrom(byte[], int, int)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link RegionInfo#parseDelimitedFrom(byte[], int, int)}. */ @Deprecated public static List parseDelimitedFrom(final byte[] bytes, final int offset, @@ -1026,8 +985,8 @@ public static List parseDelimitedFrom(final byte[] bytes, final int * @param regionA * @param regionB * @return true if two regions are adjacent - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link org.apache.hadoop.hbase.client.RegionInfo#areAdjacent(RegionInfo, RegionInfo)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link org.apache.hadoop.hbase.client.RegionInfo#areAdjacent(RegionInfo, RegionInfo)}. */ @Deprecated public static boolean areAdjacent(HRegionInfo regionA, HRegionInfo regionB) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index fd679bd0cbc4..d14996fb1547 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,17 +23,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Data structure to hold RegionInfo and the address for the hosting - * HRegionServer. Immutable. Comparable, but we compare the 'location' only: - * i.e. the hostname and port, and *not* the regioninfo. This means two - * instances are the same if they refer to the same 'location' (the same - * hostname and port), though they may be carrying different regions. - * - * On a big cluster, each client will have thousands of instances of this object, often - * 100 000 of them if not million. It's important to keep the object size as small - * as possible. - * - *
    This interface has been marked InterfaceAudience.Public in 0.96 and 0.98. + * Data structure to hold RegionInfo and the address for the hosting HRegionServer. Immutable. + * Comparable, but we compare the 'location' only: i.e. the hostname and port, and *not* the + * regioninfo. This means two instances are the same if they refer to the same 'location' (the same + * hostname and port), though they may be carrying different regions. On a big cluster, each client + * will have thousands of instances of this object, often 100 000 of them if not million. It's + * important to keep the object size as small as possible.
    + * This interface has been marked InterfaceAudience.Public in 0.96 and 0.98. */ @InterfaceAudience.Public public class HRegionLocation implements Comparable { @@ -75,7 +70,7 @@ public boolean equals(Object o) { if (!(o instanceof HRegionLocation)) { return false; } - return this.compareTo((HRegionLocation)o) == 0; + return this.compareTo((HRegionLocation) o) == 0; } /** @@ -87,19 +82,18 @@ public int hashCode() { } /** - * * @return Immutable HRegionInfo * @deprecated Since 2.0.0. Will remove in 3.0.0. Use {@link #getRegion()}} instead. */ @Deprecated - public HRegionInfo getRegionInfo(){ + public HRegionInfo getRegionInfo() { return regionInfo == null ? null : new ImmutableHRegionInfo(regionInfo); } /** * @return regionInfo */ - public RegionInfo getRegion(){ + public RegionInfo getRegion() { return regionInfo; } @@ -116,8 +110,8 @@ public long getSeqNum() { } /** - * @return String made of hostname and port formatted as - * per {@link Addressing#createHostAndPortStr(String, int)} + * @return String made of hostname and port formatted as per + * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 8f9e77ac6488..ae588ff98245 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,12 +41,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * HTableDescriptor contains the details about an HBase table such as the descriptors of - * all the column families, is the table a catalog table, hbase:meta , - * if the table is read only, the maximum size of the memstore, - * when the region split should occur, coprocessors associated with it etc... - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link TableDescriptorBuilder} to build {@link HTableDescriptor}. + * HTableDescriptor contains the details about an HBase table such as the descriptors of all the + * column families, is the table a catalog table, hbase:meta , if the table is read + * only, the maximum size of the memstore, when the region split should occur, coprocessors + * associated with it etc... + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link TableDescriptorBuilder} to build {@link HTableDescriptor}. */ @Deprecated @InterfaceAudience.Public @@ -66,7 +65,8 @@ public class HTableDescriptor implements TableDescriptor, ComparableHADOOP-1581 HBASE: (HBASE-174) Un-openable tablename bug + * @see HADOOP-1581 HBASE: (HBASE-174) + * Un-openable tablename bug */ public HTableDescriptor(final TableName name) { this(new ModifyableTableDescriptor(name)); @@ -94,8 +99,8 @@ public HTableDescriptor(final TableName name) { /** * Construct a table descriptor by cloning the descriptor passed as a parameter. *

    - * Makes a deep copy of the supplied descriptor. - * Can make a modifiable descriptor from an ImmutableHTableDescriptor. + * Makes a deep copy of the supplied descriptor. Can make a modifiable descriptor from an + * ImmutableHTableDescriptor. * @param desc The descriptor. */ public HTableDescriptor(final HTableDescriptor desc) { @@ -103,8 +108,7 @@ public HTableDescriptor(final HTableDescriptor desc) { } protected HTableDescriptor(final HTableDescriptor desc, boolean deepClone) { - this(deepClone ? new ModifyableTableDescriptor(desc.getTableName(), desc) - : desc.delegatee); + this(deepClone ? new ModifyableTableDescriptor(desc.getTableName(), desc) : desc.delegatee); } public HTableDescriptor(final TableDescriptor desc) { @@ -112,11 +116,11 @@ public HTableDescriptor(final TableDescriptor desc) { } /** - * Construct a table descriptor by cloning the descriptor passed as a parameter - * but using a different table name. + * Construct a table descriptor by cloning the descriptor passed as a parameter but using a + * different table name. *

    - * Makes a deep copy of the supplied descriptor. - * Can make a modifiable descriptor from an ImmutableHTableDescriptor. + * Makes a deep copy of the supplied descriptor. Can make a modifiable descriptor from an + * ImmutableHTableDescriptor. * @param name Table name. * @param desc The descriptor. */ @@ -130,7 +134,6 @@ protected HTableDescriptor(ModifyableTableDescriptor delegatee) { /** * This is vestigial API. It will be removed in 3.0. - * * @return always return the false */ public boolean isRootRegion() { @@ -138,11 +141,8 @@ public boolean isRootRegion() { } /** - * Checks if this table is hbase:meta - * region. - * - * @return true if this table is hbase:meta - * region + * Checks if this table is hbase:meta region. + * @return true if this table is hbase:meta region */ @Override public boolean isMetaRegion() { @@ -151,7 +151,6 @@ public boolean isMetaRegion() { /** * Checks if the table is a hbase:meta table - * * @return true if table is hbase:meta region. */ @Override @@ -169,7 +168,6 @@ public Map getValues() { /** * Setter for storing metadata as a (key, value) pair in map - * * @param key The key. * @param value The value. If null, removes the setting. */ @@ -180,7 +178,6 @@ public HTableDescriptor setValue(byte[] key, byte[] value) { /* * Setter for storing metadata as a (key, value) pair in map - * * @param key The key. * @param value The value. If null, removes the setting. */ @@ -191,7 +188,6 @@ public HTableDescriptor setValue(final Bytes key, final Bytes value) { /** * Setter for storing metadata as a (key, value) pair in map - * * @param key The key. * @param value The value. If null, removes the setting. */ @@ -202,9 +198,7 @@ public HTableDescriptor setValue(String key, String value) { /** * Remove metadata represented by the key from the map - * - * @param key Key whose key and value we're to remove from HTableDescriptor - * parameters. + * @param key Key whose key and value we're to remove from HTableDescriptor parameters. */ public void remove(final String key) { getDelegateeForModification().removeValue(Bytes.toBytes(key)); @@ -212,9 +206,7 @@ public void remove(final String key) { /** * Remove metadata represented by the key from the map - * - * @param key Key whose key and value we're to remove from HTableDescriptor - * parameters. + * @param key Key whose key and value we're to remove from HTableDescriptor parameters. */ public void remove(Bytes key) { getDelegateeForModification().removeValue(key); @@ -222,18 +214,15 @@ public void remove(Bytes key) { /** * Remove metadata represented by the key from the map - * - * @param key Key whose key and value we're to remove from HTableDescriptor - * parameters. + * @param key Key whose key and value we're to remove from HTableDescriptor parameters. */ - public void remove(final byte [] key) { + public void remove(final byte[] key) { getDelegateeForModification().removeValue(key); } /** - * Check if the readOnly flag of the table is set. If the readOnly flag is - * set then the contents of the table can only be read from but not modified. - * + * Check if the readOnly flag of the table is set. If the readOnly flag is set then the contents + * of the table can only be read from but not modified. * @return true if all columns in the table should be read only */ @Override @@ -242,12 +231,10 @@ public boolean isReadOnly() { } /** - * Setting the table as read only sets all the columns in the table as read - * only. By default all tables are modifiable, but if the readOnly flag is - * set to true then the contents of the table can only be read but not modified. - * - * @param readOnly True if all of the columns in the table should be read - * only. + * Setting the table as read only sets all the columns in the table as read only. By default all + * tables are modifiable, but if the readOnly flag is set to true then the contents of the table + * can only be read but not modified. + * @param readOnly True if all of the columns in the table should be read only. */ public HTableDescriptor setReadOnly(final boolean readOnly) { getDelegateeForModification().setReadOnly(readOnly); @@ -255,9 +242,8 @@ public HTableDescriptor setReadOnly(final boolean readOnly) { } /** - * Check if the compaction enable flag of the table is true. If flag is - * false then no minor/major compactions will be done in real. - * + * Check if the compaction enable flag of the table is true. If flag is false then no minor/major + * compactions will be done in real. * @return true if table compaction enabled */ @Override @@ -267,7 +253,6 @@ public boolean isCompactionEnabled() { /** * Setting the table compaction enable flag. - * * @param isEnable True if enable compaction. */ public HTableDescriptor setCompactionEnabled(final boolean isEnable) { @@ -276,9 +261,8 @@ public HTableDescriptor setCompactionEnabled(final boolean isEnable) { } /** - * Check if the region split enable flag of the table is true. If flag is - * false then no split will be done. - * + * Check if the region split enable flag of the table is true. If flag is false then no split will + * be done. * @return true if table region split enabled */ @Override @@ -288,7 +272,6 @@ public boolean isSplitEnabled() { /** * Setting the table region split enable flag. - * * @param isEnable True if enable split. */ public HTableDescriptor setSplitEnabled(final boolean isEnable) { @@ -296,11 +279,9 @@ public HTableDescriptor setSplitEnabled(final boolean isEnable) { return this; } - /** - * Check if the region merge enable flag of the table is true. If flag is - * false then no merge will be done. - * + * Check if the region merge enable flag of the table is true. If flag is false then no merge will + * be done. * @return true if table region merge enabled */ @Override @@ -310,7 +291,6 @@ public boolean isMergeEnabled() { /** * Setting the table region merge enable flag. - * * @param isEnable True if enable merge. */ public HTableDescriptor setMergeEnabled(final boolean isEnable) { @@ -319,9 +299,8 @@ public HTableDescriptor setMergeEnabled(final boolean isEnable) { } /** - * Check if normalization enable flag of the table is true. If flag is - * false then no region normalizer won't attempt to normalize this table. - * + * Check if normalization enable flag of the table is true. If flag is false then no region + * normalizer won't attempt to normalize this table. * @return true if region normalization is enabled for this table */ @Override @@ -331,7 +310,6 @@ public boolean isNormalizationEnabled() { /** * Setting the table normalization enable flag. - * * @param isEnable True if enable normalization. */ public HTableDescriptor setNormalizationEnabled(final boolean isEnable) { @@ -379,7 +357,6 @@ public Durability getDurability() { /** * Get the name of the table - * * @return TableName */ @Override @@ -389,7 +366,6 @@ public TableName getTableName() { /** * Get the name of the table as a String - * * @return name of table as a String */ public String getNameAsString() { @@ -397,9 +373,9 @@ public String getNameAsString() { } /** - * This sets the class associated with the region split policy which - * determines when a region split should occur. The class used by - * default is defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy + * This sets the class associated with the region split policy which determines when a region + * split should occur. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy * @param clazz the class name */ public HTableDescriptor setRegionSplitPolicyClassName(String clazz) { @@ -408,46 +384,40 @@ public HTableDescriptor setRegionSplitPolicyClassName(String clazz) { } /** - * This gets the class associated with the region split policy which - * determines when a region split should occur. The class used by - * default is defined in org.apache.hadoop.hbase.regionserver.RegionSplitPolicy - * - * @return the class name of the region split policy for this table. - * If this returns null, the default split policy is used. + * This gets the class associated with the region split policy which determines when a region + * split should occur. The class used by default is defined in + * org.apache.hadoop.hbase.regionserver.RegionSplitPolicy + * @return the class name of the region split policy for this table. If this returns null, the + * default split policy is used. */ @Override - public String getRegionSplitPolicyClassName() { + public String getRegionSplitPolicyClassName() { return delegatee.getRegionSplitPolicyClassName(); } /** - * Returns the maximum size upto which a region can grow to after which a region - * split is triggered. The region size is represented by the size of the biggest - * store file in that region. - * + * Returns the maximum size upto which a region can grow to after which a region split is + * triggered. The region size is represented by the size of the biggest store file in that region. * @return max hregion size for table, -1 if not set. - * * @see #setMaxFileSize(long) */ - @Override + @Override public long getMaxFileSize() { return delegatee.getMaxFileSize(); } /** - * Sets the maximum size upto which a region can grow to after which a region - * split is triggered. The region size is represented by the size of the biggest - * store file in that region, i.e. If the biggest store file grows beyond the - * maxFileSize, then the region split is triggered. This defaults to a value of - * 256 MB. + * Sets the maximum size upto which a region can grow to after which a region split is triggered. + * The region size is represented by the size of the biggest store file in that region, i.e. If + * the biggest store file grows beyond the maxFileSize, then the region split is triggered. This + * defaults to a value of 256 MB. *

    - * This is not an absolute value and might vary. Assume that a single row exceeds - * the maxFileSize then the storeFileSize will be greater than maxFileSize since - * a single row cannot be split across multiple regions + * This is not an absolute value and might vary. Assume that a single row exceeds the maxFileSize + * then the storeFileSize will be greater than maxFileSize since a single row cannot be split + * across multiple regions *

    - * - * @param maxFileSize The maximum file size that a store file can grow to - * before a split is triggered. + * @param maxFileSize The maximum file size that a store file can grow to before a split is + * triggered. */ public HTableDescriptor setMaxFileSize(long maxFileSize) { getDelegateeForModification().setMaxFileSize(maxFileSize); @@ -461,9 +431,7 @@ public HTableDescriptor setMaxFileSize(String maxFileSize) throws HBaseException /** * Returns the size of the memstore after which a flush to filesystem is triggered. - * * @return memory cache flush size for each hregion, -1 if not set. - * * @see #setMemStoreFlushSize(long) */ @Override @@ -472,9 +440,8 @@ public long getMemStoreFlushSize() { } /** - * Represents the maximum size of the memstore after which the contents of the - * memstore are flushed to the filesystem. This defaults to a size of 64 MB. - * + * Represents the maximum size of the memstore after which the contents of the memstore are + * flushed to the filesystem. This defaults to a size of 64 MB. * @param memstoreFlushSize memory cache flush size for each hregion */ public HTableDescriptor setMemStoreFlushSize(long memstoreFlushSize) { @@ -511,8 +478,8 @@ public String getFlushPolicyClassName() { } /** - * Adds a column family. - * For the updating purpose please use {@link #modifyFamily(HColumnDescriptor)} instead. + * Adds a column family. For the updating purpose please use + * {@link #modifyFamily(HColumnDescriptor)} instead. * @param family HColumnDescriptor of family to add. */ public HTableDescriptor addFamily(final HColumnDescriptor family) { @@ -535,13 +502,12 @@ public HTableDescriptor modifyFamily(final HColumnDescriptor family) { * @param familyName Family name or column name. * @return true if the table contains the specified family name */ - public boolean hasFamily(final byte [] familyName) { + public boolean hasFamily(final byte[] familyName) { return delegatee.hasColumnFamily(familyName); } /** - * @return Name of this table and then a map of all of the column family - * descriptors. + * @return Name of this table and then a map of all of the column family descriptors. * @see #getNameAsString() */ @Override @@ -550,8 +516,8 @@ public String toString() { } /** - * @return Name of this table and then a map of all of the column family - * descriptors (with only the non-default column family attributes) + * @return Name of this table and then a map of all of the column family descriptors (with only + * the non-default column family attributes) */ @Override public String toStringCustomizedValues() { @@ -562,16 +528,14 @@ public String toStringCustomizedValues() { * @return map of all table attributes formatted into string. */ public String toStringTableAttributes() { - return delegatee.toStringTableAttributes(); + return delegatee.toStringTableAttributes(); } /** - * Compare the contents of the descriptor with another one passed as a parameter. - * Checks if the obj passed is an instance of HTableDescriptor, if yes then the - * contents of the descriptors are compared. - * + * Compare the contents of the descriptor with another one passed as a parameter. Checks if the + * obj passed is an instance of HTableDescriptor, if yes then the contents of the descriptors are + * compared. * @return true if the contents of the the two descriptors exactly match - * * @see java.lang.Object#equals(java.lang.Object) */ @Override @@ -596,11 +560,10 @@ public int hashCode() { // Comparable /** - * Compares the descriptor with another descriptor which is passed as a parameter. - * This compares the content of the two descriptors and not the reference. - * - * @return 0 if the contents of the descriptors are exactly matching, - * 1 if there is a mismatch in the contents + * Compares the descriptor with another descriptor which is passed as a parameter. This compares + * the content of the two descriptors and not the reference. + * @return 0 if the contents of the descriptors are exactly matching, 1 if there is a mismatch in + * the contents */ @Override public int compareTo(final HTableDescriptor other) { @@ -608,19 +571,17 @@ public int compareTo(final HTableDescriptor other) { } /** - * Returns an unmodifiable collection of all the {@link HColumnDescriptor} - * of all the column families of the table. + * Returns an unmodifiable collection of all the {@link HColumnDescriptor} of all the column + * families of the table. * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getColumnFamilies()} instead. - * @return Immutable collection of {@link HColumnDescriptor} of all the - * column families. + * @return Immutable collection of {@link HColumnDescriptor} of all the column families. * @see #getColumnFamilies() * @see HBASE-18008 */ @Deprecated public Collection getFamilies() { - return Stream.of(delegatee.getColumnFamilies()) - .map(this::toHColumnDescriptor) - .collect(Collectors.toList()); + return Stream.of(delegatee.getColumnFamilies()).map(this::toHColumnDescriptor) + .collect(Collectors.toList()); } /** @@ -641,8 +602,8 @@ public HTableDescriptor setRegionReplication(int regionReplication) { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #hasRegionMemStoreReplication()} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #hasRegionMemStoreReplication()} instead */ @Deprecated public boolean hasRegionMemstoreReplication() { @@ -658,8 +619,8 @@ public boolean hasRegionMemStoreReplication() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #setRegionMemStoreReplication(boolean)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #setRegionMemStoreReplication(boolean)} instead */ @Deprecated public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication) { @@ -667,13 +628,11 @@ public HTableDescriptor setRegionMemstoreReplication(boolean memstoreReplication } /** - * Enable or Disable the memstore replication from the primary region to the replicas. - * The replication will be used only for meta operations (e.g. flush, compaction, ...) - * - * @param memstoreReplication true if the new data written to the primary region - * should be replicated. - * false if the secondaries can tollerate to have new - * data only when the primary flushes the memstore. + * Enable or Disable the memstore replication from the primary region to the replicas. The + * replication will be used only for meta operations (e.g. flush, compaction, ...) + * @param memstoreReplication true if the new data written to the primary region should be + * replicated. false if the secondaries can tollerate to have new data only when the + * primary flushes the memstore. */ public HTableDescriptor setRegionMemStoreReplication(boolean memstoreReplication) { getDelegateeForModification().setRegionMemStoreReplication(memstoreReplication); @@ -691,15 +650,13 @@ public int getPriority() { } /** - * Returns all the column family names of the current table. The map of - * HTableDescriptor contains mapping of family name to HColumnDescriptors. - * This returns all the keys of the family map which represents the column - * family names of the table. - * + * Returns all the column family names of the current table. The map of HTableDescriptor contains + * mapping of family name to HColumnDescriptors. This returns all the keys of the family map which + * represents the column family names of the table. * @return Immutable sorted set of the keys of the families. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-18008). - * Use {@link #getColumnFamilyNames()}. + * (HBASE-18008). Use + * {@link #getColumnFamilyNames()}. */ @Deprecated public Set getFamiliesKeys() { @@ -708,7 +665,6 @@ public Set getFamiliesKeys() { /** * Returns the count of the column families of the table. - * * @return Count of column families of the table */ @Override @@ -717,9 +673,7 @@ public int getColumnFamilyCount() { } /** - * Returns an array all the {@link HColumnDescriptor} of the column families - * of the table. - * + * Returns an array all the {@link HColumnDescriptor} of the column families of the table. * @return Array of all the HColumnDescriptors of the current table * @deprecated since 2.0.0 and will be removed in 3.0.0. * @see #getFamilies() @@ -728,19 +682,17 @@ public int getColumnFamilyCount() { @Deprecated @Override public HColumnDescriptor[] getColumnFamilies() { - return Stream.of(delegatee.getColumnFamilies()) - .map(this::toHColumnDescriptor) - .toArray(size -> new HColumnDescriptor[size]); + return Stream.of(delegatee.getColumnFamilies()).map(this::toHColumnDescriptor) + .toArray(size -> new HColumnDescriptor[size]); } /** - * Returns the HColumnDescriptor for a specific column family with name as - * specified by the parameter column. + * Returns the HColumnDescriptor for a specific column family with name as specified by the + * parameter column. * @param column Column family name - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getColumnFamily(byte[])} - * instead. + * instead. * @see #getColumnFamily(byte[]) * @see HBASE-18008 */ @@ -749,16 +701,13 @@ public HColumnDescriptor getFamily(final byte[] column) { return toHColumnDescriptor(delegatee.getColumnFamily(column)); } - /** - * Removes the HColumnDescriptor with name specified by the parameter column - * from the table descriptor - * + * Removes the HColumnDescriptor with name specified by the parameter column from the table + * descriptor * @param column Name of the column family to be removed. - * @return Column descriptor for the passed family name or the family on - * passed in column. + * @return Column descriptor for the passed family name or the family on passed in column. */ - public HColumnDescriptor removeFamily(final byte [] column) { + public HColumnDescriptor removeFamily(final byte[] column) { return toHColumnDescriptor(getDelegateeForModification().removeColumnFamily(column)); } @@ -780,11 +729,10 @@ protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc) { } /** - * Add a table coprocessor to this table. The coprocessor - * type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. - * It won't check if the class can be loaded or not. - * Whether a coprocessor is loadable or not will be determined when - * a region is opened. + * Add a table coprocessor to this table. The coprocessor type must be + * org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. It won't check if the class can be + * loaded or not. Whether a coprocessor is loadable or not will be determined when a region is + * opened. * @param className Full class name. * @throws IOException */ @@ -794,38 +742,32 @@ public HTableDescriptor addCoprocessor(String className) throws IOException { } /** - * Add a table coprocessor to this table. The coprocessor - * type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. - * It won't check if the class can be loaded or not. - * Whether a coprocessor is loadable or not will be determined when - * a region is opened. - * @param jarFilePath Path of the jar file. If it's null, the class will be - * loaded from default classloader. + * Add a table coprocessor to this table. The coprocessor type must be + * org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. It won't check if the class can be + * loaded or not. Whether a coprocessor is loadable or not will be determined when a region is + * opened. + * @param jarFilePath Path of the jar file. If it's null, the class will be loaded from default + * classloader. * @param className Full class name. * @param priority Priority * @param kvs Arbitrary key-value parameter pairs passed into the coprocessor. * @throws IOException */ - public HTableDescriptor addCoprocessor(String className, Path jarFilePath, - int priority, final Map kvs) - throws IOException { - getDelegateeForModification().setCoprocessor( - CoprocessorDescriptorBuilder.newBuilder(className) - .setJarPath(jarFilePath == null ? null : jarFilePath.toString()) - .setPriority(priority) - .setProperties(kvs == null ? Collections.emptyMap() : kvs) - .build()); + public HTableDescriptor addCoprocessor(String className, Path jarFilePath, int priority, + final Map kvs) throws IOException { + getDelegateeForModification().setCoprocessor(CoprocessorDescriptorBuilder.newBuilder(className) + .setJarPath(jarFilePath == null ? null : jarFilePath.toString()).setPriority(priority) + .setProperties(kvs == null ? Collections.emptyMap() : kvs).build()); return this; } /** - * Add a table coprocessor to this table. The coprocessor - * type must be org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. - * It won't check if the class can be loaded or not. - * Whether a coprocessor is loadable or not will be determined when - * a region is opened. + * Add a table coprocessor to this table. The coprocessor type must be + * org.apache.hadoop.hbase.coprocessor.RegionCoprocessor. It won't check if the class can be + * loaded or not. Whether a coprocessor is loadable or not will be determined when a region is + * opened. * @param specStr The Coprocessor specification all in in one String formatted so matches - * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN} + * {@link HConstants#CP_HTD_ATTR_VALUE_PATTERN} * @throws IOException */ public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOException { @@ -835,7 +777,6 @@ public HTableDescriptor addCoprocessorWithSpec(final String specStr) throws IOEx /** * Check if the table has an attached co-processor represented by the name className - * * @param classNameToMatch - Class name of the co-processor * @return true of the table has a co-processor className */ @@ -858,12 +799,14 @@ public void removeCoprocessor(String className) { } public final static String NAMESPACE_FAMILY_INFO = TableDescriptorBuilder.NAMESPACE_FAMILY_INFO; - public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES; - public final static byte[] NAMESPACE_COL_DESC_BYTES = TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES; + public final static byte[] NAMESPACE_FAMILY_INFO_BYTES = + TableDescriptorBuilder.NAMESPACE_FAMILY_INFO_BYTES; + public final static byte[] NAMESPACE_COL_DESC_BYTES = + TableDescriptorBuilder.NAMESPACE_COL_DESC_BYTES; /** Table descriptor for namespace table */ - public static final HTableDescriptor NAMESPACE_TABLEDESC - = new HTableDescriptor(TableDescriptorBuilder.NAMESPACE_TABLEDESC); + public static final HTableDescriptor NAMESPACE_TABLEDESC = + new HTableDescriptor(TableDescriptorBuilder.NAMESPACE_TABLEDESC); /** * @deprecated since 0.94.1 @@ -911,8 +854,8 @@ public byte[] toByteArray() { * @throws IOException * @see #toByteArray() */ - public static HTableDescriptor parseFrom(final byte [] bytes) - throws DeserializationException, IOException { + public static HTableDescriptor parseFrom(final byte[] bytes) + throws DeserializationException, IOException { TableDescriptor desc = TableDescriptorBuilder.parseFrom(bytes); if (desc instanceof ModifyableTableDescriptor) { return new HTableDescriptor((ModifyableTableDescriptor) desc); @@ -932,11 +875,9 @@ public String getConfigurationValue(String key) { * Getter for fetching an unmodifiable map. */ public Map getConfiguration() { - return delegatee.getValues().entrySet().stream() - .collect(Collectors.toMap( - e -> Bytes.toString(e.getKey().get(), e.getKey().getOffset(), e.getKey().getLength()), - e -> Bytes.toString(e.getValue().get(), e.getValue().getOffset(), e.getValue().getLength()) - )); + return delegatee.getValues().entrySet().stream().collect(Collectors.toMap( + e -> Bytes.toString(e.getKey().get(), e.getKey().getOffset(), e.getKey().getLength()), + e -> Bytes.toString(e.getValue().get(), e.getValue().getOffset(), e.getValue().getLength()))); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java index 63c26e2c393f..2a099157bc76 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown if a request is table schema modification is requested but - * made for an invalid family name. + * Thrown if a request is table schema modification is requested but made for an invalid family + * name. */ @InterfaceAudience.Public public class InvalidFamilyOperationException extends DoNotRetryIOException { private static final long serialVersionUID = (1L << 22) - 1L; + /** default constructor */ public InvalidFamilyOperationException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java index dd19fa1c2279..2ae80cade98a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/KeepDeletedCells.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,27 +23,25 @@ * Ways to keep cells marked for delete around. */ /* - * Don't change the TRUE/FALSE labels below, these have to be called - * this way for backwards compatibility. + * Don't change the TRUE/FALSE labels below, these have to be called this way for backwards + * compatibility. */ @InterfaceAudience.Public public enum KeepDeletedCells { /** Deleted Cells are not retained. */ FALSE, /** - * Deleted Cells are retained until they are removed by other means - * such TTL or VERSIONS. - * If no TTL is specified or no new versions of delete cells are - * written, they are retained forever. + * Deleted Cells are retained until they are removed by other means such TTL or VERSIONS. If no + * TTL is specified or no new versions of delete cells are written, they are retained forever. */ TRUE, /** - * Deleted Cells are retained until the delete marker expires due to TTL. - * This is useful when TTL is combined with MIN_VERSIONS and one - * wants to keep a minimum number of versions around but at the same - * time remove deleted cells after the TTL. + * Deleted Cells are retained until the delete marker expires due to TTL. This is useful when TTL + * is combined with MIN_VERSIONS and one wants to keep a minimum number of versions around but at + * the same time remove deleted cells after the TTL. */ TTL; + public static KeepDeletedCells getValue(String val) { return valueOf(val.toUpperCase()); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java index 35cdecba9bb6..86e394e33403 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,6 +25,7 @@ @InterfaceAudience.Public public class MasterNotRunningException extends HBaseIOException { private static final long serialVersionUID = (1L << 23) - 1L; + /** default constructor */ public MasterNotRunningException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java index 099ea4054591..b913ac0506cd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MemoryCompactionPolicy.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,16 +30,15 @@ public enum MemoryCompactionPolicy { NONE, /** * Basic policy applies optimizations which modify the index to a more compacted representation. - * This is beneficial in all access patterns. The smaller the cells are the greater the - * benefit of this policy. - * This is the default policy. + * This is beneficial in all access patterns. The smaller the cells are the greater the benefit of + * this policy. This is the default policy. */ BASIC, /** - * In addition to compacting the index representation as the basic policy, eager policy - * eliminates duplication while the data is still in memory (much like the - * on-disk compaction does after the data is flushed to disk). This policy is most useful for - * applications with high data churn or small working sets. + * In addition to compacting the index representation as the basic policy, eager policy eliminates + * duplication while the data is still in memory (much like the on-disk compaction does after the + * data is flushed to disk). This policy is most useful for applications with high data churn or + * small working sets. */ EAGER, /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index c55086c7fbe7..1e0d772cb272 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -72,11 +72,12 @@ import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.util.PairOfSameType; -import org.apache.hbase.thirdparty.com.google.common.base.Throwables; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Throwables; + /** *

    * Read/write operations on hbase:meta region as well as assignment information stored @@ -154,10 +155,8 @@ public class MetaTableAccessor { @InterfaceAudience.Private public enum QueryType { - ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), - REGION(HConstants.CATALOG_FAMILY), - TABLE(HConstants.TABLE_FAMILY), - REPLICATION(HConstants.REPLICATION_BARRIER_FAMILY); + ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), REGION(HConstants.CATALOG_FAMILY), + TABLE(HConstants.TABLE_FAMILY), REPLICATION(HConstants.REPLICATION_BARRIER_FAMILY); private final byte[][] families; @@ -174,8 +173,8 @@ byte[][] getFamilies() { static final char META_REPLICA_ID_DELIMITER = '_'; /** A regex for parsing server columns from meta. See above javadoc for meta layout */ - private static final Pattern SERVER_COLUMN_PATTERN - = Pattern.compile("^server(_[0-9a-fA-F]{4})?$"); + private static final Pattern SERVER_COLUMN_PATTERN = + Pattern.compile("^server(_[0-9a-fA-F]{4})?$"); //////////////////////// // Reading operations // @@ -257,12 +256,10 @@ private static Result get(final Table t, final Get g) throws IOException { * @deprecated use {@link #getRegionLocation(Connection, byte[])} instead */ @Deprecated - public static Pair getRegion(Connection connection, byte [] regionName) - throws IOException { + public static Pair getRegion(Connection connection, byte[] regionName) + throws IOException { HRegionLocation location = getRegionLocation(connection, regionName); - return location == null - ? null - : new Pair<>(location.getRegionInfo(), location.getServerName()); + return location == null ? null : new Pair<>(location.getRegionInfo(), location.getServerName()); } /** @@ -288,7 +285,7 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re Result r = get(getMetaHTable(connection), get); RegionLocations locations = getRegionLocations(r); return locations == null ? null - : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId()); + : locations.getRegionLocation(parsedInfo == null ? 0 : parsedInfo.getReplicaId()); } /** @@ -299,8 +296,8 @@ public static HRegionLocation getRegionLocation(Connection connection, byte[] re */ public static HRegionLocation getRegionLocation(Connection connection, RegionInfo regionInfo) throws IOException { - return getRegionLocation(getCatalogFamilyRow(connection, regionInfo), - regionInfo, regionInfo.getReplicaId()); + return getRegionLocation(getCatalogFamilyRow(connection, regionInfo), regionInfo, + regionInfo.getReplicaId()); } /** @@ -318,17 +315,17 @@ public static byte[] getMetaKeyForRegion(RegionInfo regionInfo) { return RegionReplicaUtil.getRegionInfoForDefaultReplica(regionInfo).getRegionName(); } - /** Returns an HRI parsed from this regionName. Not all the fields of the HRI - * is stored in the name, so the returned object should only be used for the fields - * in the regionName. + /** + * Returns an HRI parsed from this regionName. Not all the fields of the HRI is stored in the + * name, so the returned object should only be used for the fields in the regionName. */ // This should be moved to RegionInfo? TODO. public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws IOException { byte[][] fields = RegionInfo.parseRegionName(regionName); long regionId = Long.parseLong(Bytes.toString(fields[2])); int replicaId = fields.length > 3 ? Integer.parseInt(Bytes.toString(fields[3]), 16) : 0; - return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])) - .setStartKey(fields[1]).setRegionId(regionId).setReplicaId(replicaId).build(); + return RegionInfoBuilder.newBuilder(TableName.valueOf(fields[0])).setStartKey(fields[1]) + .setRegionId(regionId).setReplicaId(replicaId).build(); } /** @@ -337,26 +334,25 @@ public static RegionInfo parseRegionInfoFromRegionName(byte[] regionName) throws * @param regionName region we're looking for * @return result of the specified region */ - public static Result getRegionResult(Connection connection, - byte[] regionName) throws IOException { + public static Result getRegionResult(Connection connection, byte[] regionName) + throws IOException { Get get = new Get(regionName); get.addFamily(HConstants.CATALOG_FAMILY); return get(getMetaHTable(connection), get); } /** - * Scans META table for a row whose key contains the specified regionEncodedName, - * returning a single related Result instance if any row is found, null otherwise. - * + * Scans META table for a row whose key contains the specified regionEncodedName, returning + * a single related Result instance if any row is found, null otherwise. * @param connection the connection to query META table. * @param regionEncodedName the region encoded name to look for at META. * @return Result instance with the row related info in META, null otherwise. * @throws IOException if any errors occur while querying META. */ - public static Result scanByRegionEncodedName(Connection connection, - String regionEncodedName) throws IOException { - RowFilter rowFilter = new RowFilter(CompareOperator.EQUAL, - new SubstringComparator(regionEncodedName)); + public static Result scanByRegionEncodedName(Connection connection, String regionEncodedName) + throws IOException { + RowFilter rowFilter = + new RowFilter(CompareOperator.EQUAL, new SubstringComparator(regionEncodedName)); Scan scan = getMetaScan(connection.getConfiguration(), 1); scan.setFilter(rowFilter); try (Table table = getMetaHTable(connection); @@ -366,8 +362,8 @@ public static Result scanByRegionEncodedName(Connection connection, } /** - * @return Return all regioninfos listed in the 'info:merge*' columns of - * the regionName row. + * @return Return all regioninfos listed in the 'info:merge*' columns of the + * regionName row. */ @Nullable public static List getMergeRegions(Connection connection, byte[] regionName) @@ -387,12 +383,12 @@ public static boolean hasMergeRegions(Connection conn, byte[] regionName) throws * match the regex 'info:merge.*' in array of cells. */ @Nullable - public static Map getMergeRegionsWithName(Cell [] cells) { + public static Map getMergeRegionsWithName(Cell[] cells) { if (cells == null) { return null; } Map regionsToMerge = null; - for (Cell cell: cells) { + for (Cell cell : cells) { if (!isMergeQualifierPrefix(cell)) { continue; } @@ -410,21 +406,21 @@ public static Map getMergeRegionsWithName(Cell [] cells) { } /** - * @return Deserialized regioninfo values taken from column values that match - * the regex 'info:merge.*' in array of cells. + * @return Deserialized regioninfo values taken from column values that match the regex + * 'info:merge.*' in array of cells. */ @Nullable - public static List getMergeRegions(Cell [] cells) { + public static List getMergeRegions(Cell[] cells) { Map mergeRegionsWithName = getMergeRegionsWithName(cells); return (mergeRegionsWithName == null) ? null : new ArrayList<>(mergeRegionsWithName.values()); } /** - * @return True if any merge regions present in cells; i.e. - * the column in cell matches the regex 'info:merge.*'. + * @return True if any merge regions present in cells; i.e. the column in + * cell matches the regex 'info:merge.*'. */ - public static boolean hasMergeRegions(Cell [] cells) { - for (Cell cell: cells) { + public static boolean hasMergeRegions(Cell[] cells) { + for (Cell cell : cells) { if (!isMergeQualifierPrefix(cell)) { continue; } @@ -438,60 +434,57 @@ public static boolean hasMergeRegions(Cell [] cells) { */ private static boolean isMergeQualifierPrefix(Cell cell) { // Check to see if has family and that qualifier starts with the merge qualifier 'merge' - return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) && - PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX); + return CellUtil.matchingFamily(cell, HConstants.CATALOG_FAMILY) + && PrivateCellUtil.qualifierStartsWith(cell, HConstants.MERGE_QUALIFIER_PREFIX); } /** * Lists all of the regions currently in META. - * * @param connection to connect with * @param excludeOfflinedSplitParents False if we are to include offlined/splitparents regions, - * true and we'll leave out offlined regions from returned list + * true and we'll leave out offlined regions from returned list * @return List of all user-space regions. */ public static List getAllRegions(Connection connection, - boolean excludeOfflinedSplitParents) - throws IOException { + boolean excludeOfflinedSplitParents) throws IOException { List> result; - result = getTableRegionsAndLocations(connection, null, - excludeOfflinedSplitParents); + result = getTableRegionsAndLocations(connection, null, excludeOfflinedSplitParents); return getListOfRegionInfos(result); } /** - * Gets all of the regions of the specified table. Do not use this method - * to get meta table regions, use methods in MetaTableLocator instead. + * Gets all of the regions of the specified table. Do not use this method to get meta table + * regions, use methods in MetaTableLocator instead. * @param connection connection we're using * @param tableName table we're looking for * @return Ordered list of {@link RegionInfo}. */ public static List getTableRegions(Connection connection, TableName tableName) - throws IOException { + throws IOException { return getTableRegions(connection, tableName, false); } /** - * Gets all of the regions of the specified table. Do not use this method - * to get meta table regions, use methods in MetaTableLocator instead. + * Gets all of the regions of the specified table. Do not use this method to get meta table + * regions, use methods in MetaTableLocator instead. * @param connection connection we're using * @param tableName table we're looking for - * @param excludeOfflinedSplitParents If true, do not include offlined split - * parents in the return. + * @param excludeOfflinedSplitParents If true, do not include offlined split parents in the + * return. * @return Ordered list of {@link RegionInfo}. */ public static List getTableRegions(Connection connection, TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException { List> result = - getTableRegionsAndLocations(connection, tableName, excludeOfflinedSplitParents); + getTableRegionsAndLocations(connection, tableName, excludeOfflinedSplitParents); return getListOfRegionInfos(result); } - private static List getListOfRegionInfos( - final List> pairs) { + private static List + getListOfRegionInfos(final List> pairs) { if (pairs == null || pairs.isEmpty()) { return Collections.emptyList(); } @@ -511,16 +504,16 @@ public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type return null; } switch (type) { - case REGION: - byte[] startRow = new byte[tableName.getName().length + 2]; - System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length); - startRow[startRow.length - 2] = HConstants.DELIMITER; - startRow[startRow.length - 1] = HConstants.DELIMITER; - return startRow; - case ALL: - case TABLE: - default: - return tableName.getName(); + case REGION: + byte[] startRow = new byte[tableName.getName().length + 2]; + System.arraycopy(tableName.getName(), 0, startRow, 0, tableName.getName().length); + startRow[startRow.length - 2] = HConstants.DELIMITER; + startRow[startRow.length - 1] = HConstants.DELIMITER; + return startRow; + case ALL: + case TABLE: + default: + return tableName.getName(); } } @@ -534,30 +527,28 @@ public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) } final byte[] stopRow; switch (type) { - case REGION: - stopRow = new byte[tableName.getName().length + 3]; - System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); - stopRow[stopRow.length - 3] = ' '; - stopRow[stopRow.length - 2] = HConstants.DELIMITER; - stopRow[stopRow.length - 1] = HConstants.DELIMITER; - break; - case ALL: - case TABLE: - default: - stopRow = new byte[tableName.getName().length + 1]; - System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); - stopRow[stopRow.length - 1] = ' '; - break; + case REGION: + stopRow = new byte[tableName.getName().length + 3]; + System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); + stopRow[stopRow.length - 3] = ' '; + stopRow[stopRow.length - 2] = HConstants.DELIMITER; + stopRow[stopRow.length - 1] = HConstants.DELIMITER; + break; + case ALL: + case TABLE: + default: + stopRow = new byte[tableName.getName().length + 1]; + System.arraycopy(tableName.getName(), 0, stopRow, 0, tableName.getName().length); + stopRow[stopRow.length - 1] = ' '; + break; } return stopRow; } /** - * This method creates a Scan object that will only scan catalog rows that - * belong to the specified table. It doesn't specify any columns. - * This is a better alternative to just using a start row and scan until - * it hits a new table since that requires parsing the HRI to get the table - * name. + * This method creates a Scan object that will only scan catalog rows that belong to the specified + * table. It doesn't specify any columns. This is a better alternative to just using a start row + * and scan until it hits a new table since that requires parsing the HRI to get the table name. * @param tableName bytes of table's name * @return configured Scan object */ @@ -595,8 +586,7 @@ private static Scan getMetaScan(Configuration conf, int rowUpperLimit) { * @return Return list of regioninfos and server. */ public static List> - getTableRegionsAndLocations(Connection connection, TableName tableName) - throws IOException { + getTableRegionsAndLocations(Connection connection, TableName tableName) throws IOException { return getTableRegionsAndLocations(connection, tableName, true); } @@ -612,56 +602,52 @@ public static List> getTableRegionsAndLocations( Connection connection, @Nullable final TableName tableName, final boolean excludeOfflinedSplitParents) throws IOException { if (tableName != null && tableName.equals(TableName.META_TABLE_NAME)) { - throw new IOException("This method can't be used to locate meta regions;" - + " use MetaTableLocator instead"); + throw new IOException( + "This method can't be used to locate meta regions;" + " use MetaTableLocator instead"); } // Make a version of CollectingVisitor that collects RegionInfo and ServerAddress CollectingVisitor> visitor = - new CollectingVisitor>() { - private RegionLocations current = null; - - @Override - public boolean visit(Result r) throws IOException { - current = getRegionLocations(r); - if (current == null || current.getRegionLocation().getRegion() == null) { - LOG.warn("No serialized RegionInfo in " + r); - return true; + new CollectingVisitor>() { + private RegionLocations current = null; + + @Override + public boolean visit(Result r) throws IOException { + current = getRegionLocations(r); + if (current == null || current.getRegionLocation().getRegion() == null) { + LOG.warn("No serialized RegionInfo in " + r); + return true; + } + RegionInfo hri = current.getRegionLocation().getRegion(); + if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; + // Else call super and add this Result to the collection. + return super.visit(r); } - RegionInfo hri = current.getRegionLocation().getRegion(); - if (excludeOfflinedSplitParents && hri.isSplitParent()) return true; - // Else call super and add this Result to the collection. - return super.visit(r); - } - @Override - void add(Result r) { - if (current == null) { - return; - } - for (HRegionLocation loc : current.getRegionLocations()) { - if (loc != null) { - this.results.add(new Pair<>(loc.getRegion(), loc.getServerName())); + @Override + void add(Result r) { + if (current == null) { + return; + } + for (HRegionLocation loc : current.getRegionLocations()) { + if (loc != null) { + this.results.add(new Pair<>(loc.getRegion(), loc.getServerName())); + } } } - } - }; - scanMeta(connection, - getTableStartRowForMeta(tableName, QueryType.REGION), - getTableStopRowForMeta(tableName, QueryType.REGION), - QueryType.REGION, visitor); + }; + scanMeta(connection, getTableStartRowForMeta(tableName, QueryType.REGION), + getTableStopRowForMeta(tableName, QueryType.REGION), QueryType.REGION, visitor); return visitor.getResults(); } /** * @param connection connection we're using * @param serverName server whose regions we're interested in - * @return List of user regions installed on this server (does not include - * catalog regions). + * @return List of user regions installed on this server (does not include catalog regions). * @throws IOException */ - public static NavigableMap - getServerUserRegions(Connection connection, final ServerName serverName) - throws IOException { + public static NavigableMap getServerUserRegions(Connection connection, + final ServerName serverName) throws IOException { final NavigableMap hris = new TreeMap<>(); // Fill the above hris map with entries from hbase:meta that have the passed // servername. @@ -684,10 +670,9 @@ void add(Result r) { return hris; } - public static void fullScanMetaAndPrint(Connection connection) - throws IOException { + public static void fullScanMetaAndPrint(Connection connection) throws IOException { Visitor v = r -> { - if (r == null || r.isEmpty()) { + if (r == null || r.isEmpty()) { return true; } LOG.info("fullScanMetaAndPrint.Current Meta Row: " + r); @@ -729,10 +714,10 @@ private static void scanMeta(Connection connection, @Nullable final byte[] start /** * Performs a scan of META table for given table starting from given row. * @param connection connection we're using - * @param visitor visitor to call - * @param tableName table withing we scan - * @param row start scan from this row - * @param rowLimit max number of rows to return + * @param visitor visitor to call + * @param tableName table withing we scan + * @param row start scan from this row + * @param rowLimit max number of rows to return */ public static void scanMeta(Connection connection, final Visitor visitor, final TableName tableName, final byte[] row, final int rowLimit) throws IOException { @@ -742,8 +727,8 @@ public static void scanMeta(Connection connection, final Visitor visitor, startRow = getTableStartRowForMeta(tableName, QueryType.REGION); if (row != null) { RegionInfo closestRi = getClosestRegionInfo(connection, tableName, row); - startRow = - RegionInfo.createRegionName(tableName, closestRi.getStartKey(), HConstants.ZEROES, false); + startRow = RegionInfo.createRegionName(tableName, closestRi.getStartKey(), + HConstants.ZEROES, false); } stopRow = getTableStopRowForMeta(tableName, QueryType.REGION); } @@ -753,16 +738,14 @@ public static void scanMeta(Connection connection, final Visitor visitor, /** * Performs a scan of META table. * @param connection connection we're using - * @param startRow Where to start the scan. Pass null if want to begin scan - * at first row. - * @param stopRow Where to stop the scan. Pass null if want to scan all rows - * from the start one + * @param startRow Where to start the scan. Pass null if want to begin scan at first row. + * @param stopRow Where to stop the scan. Pass null if want to scan all rows from the start one * @param type scanned part of meta * @param maxRows maximum rows to return * @param visitor Visitor invoked against each row. */ static void scanMeta(Connection connection, @Nullable final byte[] startRow, - @Nullable final byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) + @Nullable final byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) throws IOException { scanMeta(connection, startRow, stopRow, type, null, maxRows, visitor); } @@ -787,9 +770,9 @@ private static void scanMeta(Connection connection, @Nullable final byte[] start } if (LOG.isTraceEnabled()) { - LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) + - " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit + - " with caching=" + scan.getCaching()); + LOG.trace("Scanning META" + " starting at row=" + Bytes.toStringBinary(startRow) + + " stopping at row=" + Bytes.toStringBinary(stopRow) + " for max=" + rowUpperLimit + + " with caching=" + scan.getCaching()); } int currentRow = 0; @@ -827,13 +810,13 @@ private static RegionInfo getClosestRegionInfo(Connection connection, try (ResultScanner resultScanner = getMetaHTable(connection).getScanner(scan)) { Result result = resultScanner.next(); if (result == null) { - throw new TableNotFoundException("Cannot find row in META " + - " for table: " + tableName + ", row=" + Bytes.toStringBinary(row)); + throw new TableNotFoundException("Cannot find row in META " + " for table: " + tableName + + ", row=" + Bytes.toStringBinary(row)); } RegionInfo regionInfo = getRegionInfo(result); if (regionInfo == null) { - throw new IOException("RegionInfo was null or empty in Meta for " + - tableName + ", row=" + Bytes.toStringBinary(row)); + throw new IOException("RegionInfo was null or empty in Meta for " + tableName + ", row=" + + Bytes.toStringBinary(row)); } return regionInfo; } @@ -907,10 +890,9 @@ public static byte[] getServerNameColumn(int replicaId) { * @return a byte[] for server column qualifier */ public static byte[] getServerColumn(int replicaId) { - return replicaId == 0 - ? HConstants.SERVER_QUALIFIER - : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.SERVER_QUALIFIER + : Bytes.toBytes(HConstants.SERVER_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -919,10 +901,9 @@ public static byte[] getServerColumn(int replicaId) { * @return a byte[] for server start code column qualifier */ public static byte[] getStartCodeColumn(int replicaId) { - return replicaId == 0 - ? HConstants.STARTCODE_QUALIFIER - : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.STARTCODE_QUALIFIER + : Bytes.toBytes(HConstants.STARTCODE_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** @@ -931,15 +912,14 @@ public static byte[] getStartCodeColumn(int replicaId) { * @return a byte[] for seqNum column qualifier */ public static byte[] getSeqNumColumn(int replicaId) { - return replicaId == 0 - ? HConstants.SEQNUM_QUALIFIER - : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER - + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); + return replicaId == 0 ? HConstants.SEQNUM_QUALIFIER + : Bytes.toBytes(HConstants.SEQNUM_QUALIFIER_STR + META_REPLICA_ID_DELIMITER + + String.format(RegionInfo.REPLICA_ID_FORMAT, replicaId)); } /** - * Parses the replicaId from the server column qualifier. See top of the class javadoc - * for the actual meta layout + * Parses the replicaId from the server column qualifier. See top of the class javadoc for the + * actual meta layout * @param serverColumn the column qualifier * @return an int for the replicaId */ @@ -969,14 +949,14 @@ public static ServerName getServerName(final Result r, final int replicaId) { byte[] serverColumn = getServerColumn(replicaId); Cell cell = r.getColumnLatestCell(getCatalogFamily(), serverColumn); if (cell == null || cell.getValueLength() == 0) return null; - String hostAndPort = Bytes.toString( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); + String hostAndPort = + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); byte[] startcodeColumn = getStartCodeColumn(replicaId); cell = r.getColumnLatestCell(getCatalogFamily(), startcodeColumn); if (cell == null || cell.getValueLength() == 0) return null; try { return ServerName.valueOf(hostAndPort, - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } catch (IllegalArgumentException e) { LOG.error("Ignoring invalid region for server " + hostAndPort + "; cell=" + cell, e); return null; @@ -987,15 +967,14 @@ public static ServerName getServerName(final Result r, final int replicaId) { * Returns the {@link ServerName} from catalog table {@link Result} where the region is * transitioning on. It should be the same as {@link MetaTableAccessor#getServerName(Result,int)} * if the server is at OPEN state. - * * @param r Result to pull the transitioning server name from - * @return A ServerName instance or {@link MetaTableAccessor#getServerName(Result,int)} - * if necessary fields not found or empty. + * @return A ServerName instance or {@link MetaTableAccessor#getServerName(Result,int)} if + * necessary fields not found or empty. */ @Nullable public static ServerName getTargetServerName(final Result r, final int replicaId) { - final Cell cell = r.getColumnLatestCell(HConstants.CATALOG_FAMILY, - getServerNameColumn(replicaId)); + final Cell cell = + r.getColumnLatestCell(HConstants.CATALOG_FAMILY, getServerNameColumn(replicaId)); if (cell == null || cell.getValueLength() == 0) { RegionLocations locations = MetaTableAccessor.getRegionLocations(r); if (locations != null) { @@ -1006,13 +985,13 @@ public static ServerName getTargetServerName(final Result r, final int replicaId } return null; } - return ServerName.parseServerName(Bytes.toString(cell.getValueArray(), cell.getValueOffset(), - cell.getValueLength())); + return ServerName.parseServerName( + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } /** - * The latest seqnum that the server writing to meta observed when opening the region. - * E.g. the seqNum when the result of {@link #getServerName(Result, int)} was written. + * The latest seqnum that the server writing to meta observed when opening the region. E.g. the + * seqNum when the result of {@link #getServerName(Result, int)} was written. * @param r Result to pull the seqNum from * @return SeqNum, or HConstants.NO_SEQNUM if there's no value written. */ @@ -1023,8 +1002,7 @@ private static long getSeqNumDuringOpen(final Result r, final int replicaId) { } /** - * Returns the daughter regions by reading the corresponding columns of the catalog table - * Result. + * Returns the daughter regions by reading the corresponding columns of the catalog table Result. * @param data a Result object from the catalog table scan * @return pair of RegionInfo or PairOfSameType(null, null) if region is not a split parent */ @@ -1036,8 +1014,8 @@ public static PairOfSameType getDaughterRegions(Result data) { /** * Returns an HRegionLocationList extracted from the result. - * @return an HRegionLocationList containing all locations for the region range or null if - * we can't deserialize the result. + * @return an HRegionLocationList containing all locations for the region range or null if we + * can't deserialize the result. */ @Nullable public static RegionLocations getRegionLocations(final Result r) { @@ -1046,7 +1024,7 @@ public static RegionLocations getRegionLocations(final Result r) { if (regionInfo == null) return null; List locations = new ArrayList<>(1); - NavigableMap> familyMap = r.getNoVersionMap(); + NavigableMap> familyMap = r.getNoVersionMap(); locations.add(getRegionLocation(r, regionInfo, 0)); @@ -1080,16 +1058,15 @@ public static RegionLocations getRegionLocations(final Result r) { } /** - * Returns the HRegionLocation parsed from the given meta row Result - * for the given regionInfo and replicaId. The regionInfo can be the default region info - * for the replica. + * Returns the HRegionLocation parsed from the given meta row Result for the given regionInfo and + * replicaId. The regionInfo can be the default region info for the replica. * @param r the meta row result * @param regionInfo RegionInfo for default replica * @param replicaId the replicaId for the HRegionLocation * @return HRegionLocation parsed from the given meta row Result for the given replicaId */ private static HRegionLocation getRegionLocation(final Result r, final RegionInfo regionInfo, - final int replicaId) { + final int replicaId) { ServerName serverName = getServerName(r, replicaId); long seqNum = getSeqNumDuringOpen(r, replicaId); RegionInfo replicaInfo = RegionReplicaUtil.getRegionInfoForReplica(regionInfo, replicaId); @@ -1098,8 +1075,7 @@ private static HRegionLocation getRegionLocation(final Result r, final RegionInf /** * Returns RegionInfo object from the column - * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog - * table Result. + * HConstants.CATALOG_FAMILY:HConstants.REGIONINFO_QUALIFIER of the catalog table Result. * @param data a Result object from the catalog table scan * @return RegionInfo or null */ @@ -1115,11 +1091,11 @@ public static RegionInfo getRegionInfo(Result data) { * @return An RegionInfo instance or null. */ @Nullable - public static RegionInfo getRegionInfo(final Result r, byte [] qualifier) { + public static RegionInfo getRegionInfo(final Result r, byte[] qualifier) { Cell cell = r.getColumnLatestCell(getCatalogFamily(), qualifier); if (cell == null) return null; - return RegionInfo.parseFromOrNull(cell.getValueArray(), - cell.getValueOffset(), cell.getValueLength()); + return RegionInfo.parseFromOrNull(cell.getValueArray(), cell.getValueOffset(), + cell.getValueLength()); } /** @@ -1128,8 +1104,7 @@ public static RegionInfo getRegionInfo(final Result r, byte [] qualifier) { * @param tableName table to fetch state for */ @Nullable - public static TableState getTableState(Connection conn, TableName tableName) - throws IOException { + public static TableState getTableState(Connection conn, TableName tableName) throws IOException { if (tableName.equals(TableName.META_TABLE_NAME)) { return new TableState(tableName, TableState.State.ENABLED); } @@ -1144,8 +1119,7 @@ public static TableState getTableState(Connection conn, TableName tableName) * @param conn connection to use * @return map {tableName -> state} */ - public static Map getTableStates(Connection conn) - throws IOException { + public static Map getTableStates(Connection conn) throws IOException { final Map states = new LinkedHashMap<>(); Visitor collector = r -> { TableState state = getTableState(r); @@ -1159,19 +1133,17 @@ public static Map getTableStates(Connection conn) } /** - * Updates state in META - * Do not use. For internal use only. + * Updates state in META Do not use. For internal use only. * @param conn connection to use * @param tableName table to look for */ - public static void updateTableState(Connection conn, TableName tableName, - TableState.State actual) throws IOException { + public static void updateTableState(Connection conn, TableName tableName, TableState.State actual) + throws IOException { updateTableState(conn, new TableState(tableName, actual)); } /** - * Decode table state from META Result. - * Should contain cell from HConstants.TABLE_FAMILY + * Decode table state from META Result. Should contain cell from HConstants.TABLE_FAMILY * @return null if not found */ @Nullable @@ -1196,8 +1168,7 @@ public interface Visitor { /** * Visit the catalog table row. * @param r A row from catalog table - * @return True if we are to proceed scanning the table, else false if - * we are to stop now. + * @return True if we are to proceed scanning the table, else false if we are to stop now. */ boolean visit(final Result r) throws IOException; } @@ -1213,6 +1184,7 @@ public interface CloseableVisitor extends Visitor, Closeable { */ static abstract class CollectingVisitor implements Visitor { final List results = new ArrayList<>(); + @Override public boolean visit(Result r) throws IOException { if (r != null && !r.isEmpty()) { @@ -1224,8 +1196,7 @@ public boolean visit(Result r) throws IOException { abstract void add(Result r); /** - * @return Collected results; wait till visits complete to collect all - * possible results + * @return Collected results; wait till visits complete to collect all possible results */ List getResults() { return this.results; @@ -1260,7 +1231,7 @@ public boolean visit(Result rowResult) throws IOException { return true; } - //skip over offline and split regions + // skip over offline and split regions if (!(info.isOffline() || info.isSplit())) { return visitInternal(rowResult); } @@ -1269,10 +1240,10 @@ public boolean visit(Result rowResult) throws IOException { } /** - * A Visitor for a table. Provides a consistent view of the table's - * hbase:meta entries during concurrent splits (see HBASE-5986 for details). This class - * does not guarantee ordered traversal of meta entries, and can block until the - * hbase:meta entries for daughters are available during splits. + * A Visitor for a table. Provides a consistent view of the table's hbase:meta entries during + * concurrent splits (see HBASE-5986 for details). This class does not guarantee ordered traversal + * of meta entries, and can block until the hbase:meta entries for daughters are available during + * splits. */ public static abstract class TableVisitorBase extends DefaultVisitorBase { private TableName tableName; @@ -1323,24 +1294,16 @@ public static Delete makeDeleteFromRegionInfo(RegionInfo regionInfo, long ts) { private static Put addDaughtersToPut(Put put, RegionInfo splitA, RegionInfo splitB) throws IOException { if (splitA != null) { - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(HConstants.SPLITA_QUALIFIER) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(RegionInfo.toByteArray(splitA)) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITA_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put) + .setValue(RegionInfo.toByteArray(splitA)).build()); } if (splitB != null) { - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(HConstants.SPLITB_QUALIFIER) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(RegionInfo.toByteArray(splitB)) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITB_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put) + .setValue(RegionInfo.toByteArray(splitB)).build()); } return put; } @@ -1401,7 +1364,7 @@ private static void deleteFromMetaTable(final Connection connection, final Delet /** * Delete the passed deletes from the hbase:meta table. * @param connection connection we're using - * @param deletes Deletes to add to hbase:meta This list should support #remove. + * @param deletes Deletes to add to hbase:meta This list should support #remove. */ private static void deleteFromMetaTable(final Connection connection, final List deletes) throws IOException { @@ -1412,14 +1375,10 @@ private static void deleteFromMetaTable(final Connection connection, final List< } private static Put addRegionStateToPut(Put put, RegionState.State state) throws IOException { - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(getRegionStateColumn()) - .setTimestamp(put.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(state.name())) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getRegionStateColumn()) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(state.name())).build()); return put; } @@ -1430,15 +1389,15 @@ public static void updateRegionState(Connection connection, RegionInfo ri, RegionState.State state) throws IOException { Put put = new Put(RegionReplicaUtil.getRegionInfoForDefaultReplica(ri).getRegionName()); MetaTableAccessor.putsToMetaTable(connection, - Collections.singletonList(addRegionStateToPut(put, state))); + Collections.singletonList(addRegionStateToPut(put, state))); } /** * Adds daughter region infos to hbase:meta row for the specified region. Note that this does not * add its daughter's as different rows, but adds information about the daughters in the same row * as the parent. Use - * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} - * if you want to do that. + * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} if + * you want to do that. * @param connection connection we're using * @param regionInfo RegionInfo of parent region * @param splitA first split daughter of the parent regionInfo @@ -1458,10 +1417,10 @@ public static void addSplitsToParent(Connection connection, RegionInfo regionInf /** * Adds a (single) hbase:meta row for the specified new region and its daughters. Note that this - * does not add its daughter's as different rows, but adds information about the daughters - * in the same row as the parent. Use - * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} - * if you want to do that. + * does not add its daughter's as different rows, but adds information about the daughters in the + * same row as the parent. Use + * {@link #splitRegion(Connection, RegionInfo, long, RegionInfo, RegionInfo, ServerName, int)} if + * you want to do that. * @param connection connection we're using * @param regionInfo region information * @throws IOException if problem connecting or updating meta @@ -1472,8 +1431,8 @@ public static void addRegionToMeta(Connection connection, RegionInfo regionInfo) } /** - * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions - * is CLOSED. + * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is + * CLOSED. * @param connection connection we're using * @param regionInfos region information list * @throws IOException if problem connecting or updating meta @@ -1485,8 +1444,8 @@ public static void addRegionsToMeta(Connection connection, List regi } /** - * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions - * is CLOSED. + * Adds a hbase:meta row for each of the specified new regions. Initial state for new regions is + * CLOSED. * @param connection connection we're using * @param regionInfos region information list * @param ts desired timestamp @@ -1517,41 +1476,36 @@ static Put addMergeRegions(Put put, Collection mergeRegions) throws int max = mergeRegions.size(); if (max > limit) { // Should never happen!!!!! But just in case. - throw new RuntimeException("Can't merge " + max + " regions in one go; " + limit + - " is upper-limit."); + throw new RuntimeException( + "Can't merge " + max + " regions in one go; " + limit + " is upper-limit."); } int counter = 0; - for (RegionInfo ri: mergeRegions) { + for (RegionInfo ri : mergeRegions) { String qualifier = String.format(HConstants.MERGE_QUALIFIER_PREFIX_STR + "%04d", counter++); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY). - setRow(put.getRow()). - setFamily(HConstants.CATALOG_FAMILY). - setQualifier(Bytes.toBytes(qualifier)). - setTimestamp(put.getTimestamp()). - setType(Type.Put). - setValue(RegionInfo.toByteArray(ri)). - build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(Bytes.toBytes(qualifier)) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(ri)) + .build()); } return put; } /** - * Merge regions into one in an atomic operation. Deletes the merging regions in - * hbase:meta and adds the merged region. + * Merge regions into one in an atomic operation. Deletes the merging regions in hbase:meta and + * adds the merged region. * @param connection connection we're using * @param mergedRegion the merged region - * @param parentSeqNum Parent regions to merge and their next open sequence id used - * by serial replication. Set to -1 if not needed by this table. + * @param parentSeqNum Parent regions to merge and their next open sequence id used by serial + * replication. Set to -1 if not needed by this table. * @param sn the location of the region */ public static void mergeRegions(Connection connection, RegionInfo mergedRegion, - Map parentSeqNum, ServerName sn, int regionReplication) - throws IOException { + Map parentSeqNum, ServerName sn, int regionReplication) throws IOException { try (Table meta = getMetaHTable(connection)) { long time = HConstants.LATEST_TIMESTAMP; List mutations = new ArrayList<>(); List replicationParents = new ArrayList<>(); - for (Map.Entry e: parentSeqNum.entrySet()) { + for (Map.Entry e : parentSeqNum.entrySet()) { RegionInfo ri = e.getKey(); long seqNum = e.getValue(); // Deletes for merging regions @@ -1611,9 +1565,8 @@ public static void splitRegion(Connection connection, RegionInfo parent, long pa try (Table meta = getMetaHTable(connection)) { long time = EnvironmentEdgeManager.currentTime(); // Put for parent - Put putParent = makePutFromRegionInfo(RegionInfoBuilder.newBuilder(parent) - .setOffline(true) - .setSplit(true).build(), time); + Put putParent = makePutFromRegionInfo( + RegionInfoBuilder.newBuilder(parent).setOffline(true).setSplit(true).build(), time); addDaughtersToPut(putParent, splitA, splitB); // Puts for daughters @@ -1673,8 +1626,7 @@ public static Put makePutFromTableState(TableState state, long ts) { * @param connection to use for deletion * @param table to delete state for */ - public static void deleteTableState(Connection connection, TableName table) - throws IOException { + public static void deleteTableState(Connection connection, TableName table) throws IOException { long time = EnvironmentEdgeManager.currentTime(); Delete delete = new Delete(table.getName()); delete.addColumns(getTableFamily(), getTableStateColumn(), time); @@ -1682,14 +1634,14 @@ public static void deleteTableState(Connection connection, TableName table) LOG.info("Deleted table " + table + " state from META"); } - private static void multiMutate(Table table, byte[] row, - Mutation... mutations) throws IOException { + private static void multiMutate(Table table, byte[] row, Mutation... mutations) + throws IOException { multiMutate(table, row, Arrays.asList(mutations)); } /** - * Performs an atomic multi-mutate operation against the given table. Used by the likes of - * merge and split as these want to make atomic mutations across multiple rows. + * Performs an atomic multi-mutate operation against the given table. Used by the likes of merge + * and split as these want to make atomic mutations across multiple rows. * @throws IOException even if we encounter a RuntimeException, we'll still wrap it in an IOE. */ static void multiMutate(final Table table, byte[] row, final List mutations) @@ -1706,12 +1658,12 @@ static void multiMutate(final Table table, byte[] row, final List muta ProtobufUtil.toMutation(ClientProtos.MutationProto.MutationType.DELETE, mutation)); } else { throw new DoNotRetryIOException( - "multi in MetaEditor doesn't support " + mutation.getClass().getName()); + "multi in MetaEditor doesn't support " + mutation.getClass().getName()); } } ServerRpcController controller = new ServerRpcController(); CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.mutateRows(controller, builder.build(), rpcCallback); MutateRowsResponse resp = rpcCallback.get(); if (controller.failedOnException()) { @@ -1801,8 +1753,7 @@ public static void deleteRegionInfos(Connection connection, List reg * @param regionsInfo list of regions to be deleted from META */ private static void deleteRegionInfos(Connection connection, List regionsInfo, - long ts) - throws IOException { + long ts) throws IOException { List deletes = new ArrayList<>(regionsInfo.size()); for (RegionInfo hri : regionsInfo) { Delete e = new Delete(hri.getRegionName()); @@ -1863,25 +1814,21 @@ public static void deleteMergeQualifiers(Connection connection, final RegionInfo // the previous GCMultipleMergedRegionsProcedure is still going on, in this case, the second // GCMultipleMergedRegionsProcedure could delete the merged region by accident! if (qualifiers.isEmpty()) { - LOG.info("No merged qualifiers for region " + mergeRegion.getRegionNameAsString() + - " in meta table, they are cleaned up already, Skip."); + LOG.info("No merged qualifiers for region " + mergeRegion.getRegionNameAsString() + + " in meta table, they are cleaned up already, Skip."); return; } deleteFromMetaTable(connection, delete); - LOG.info("Deleted merge references in " + mergeRegion.getRegionNameAsString() + - ", deleted qualifiers " + qualifiers.stream().map(Bytes::toStringBinary). - collect(Collectors.joining(", "))); - } - - public static Put addRegionInfo(final Put p, final RegionInfo hri) - throws IOException { - p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(HConstants.REGIONINFO_QUALIFIER) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) + LOG.info( + "Deleted merge references in " + mergeRegion.getRegionNameAsString() + ", deleted qualifiers " + + qualifiers.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", "))); + } + + public static Put addRegionInfo(final Put p, final RegionInfo hri) throws IOException { + p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) + .setFamily(getCatalogFamily()).setQualifier(HConstants.REGIONINFO_QUALIFIER) + .setTimestamp(p.getTimestamp()).setType(Type.Put) // Serialize the Default Replica HRI otherwise scan of hbase:meta // shows an info:regioninfo value with encoded name and region // name that differs from that of the hbase;meta row. @@ -1893,30 +1840,16 @@ public static Put addRegionInfo(final Put p, final RegionInfo hri) public static Put addLocation(Put p, ServerName sn, long openSeqNum, int replicaId) throws IOException { CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - return p.add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getServerColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(sn.getAddress().toString())) - .build()) - .add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getStartCodeColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(sn.getStartcode())) - .build()) - .add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getSeqNumColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(openSeqNum)) - .build()); + return p + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getServerColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).setValue(Bytes.toBytes(sn.getAddress().toString())).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getStartCodeColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).setValue(Bytes.toBytes(sn.getStartcode())).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)).build()); } private static void writeRegionName(ByteArrayOutputStream out, byte[] regionName) { @@ -1964,8 +1897,8 @@ private static List parseParentsBytes(byte[] bytes) { private static void addReplicationParent(Put put, List parents) throws IOException { byte[] value = getParentsBytes(parents); put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) - .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(value).build()); + .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(value).build()); } public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openSeqNum, long ts) @@ -1979,39 +1912,24 @@ public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openS * See class comment on SerialReplicationChecker */ public static void addReplicationBarrier(Put put, long openSeqNum) throws IOException { - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(HConstants.REPLICATION_BARRIER_FAMILY) - .setQualifier(HConstants.SEQNUM_QUALIFIER) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(openSeqNum)) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(HConstants.SEQNUM_QUALIFIER) + .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .build()); } public static Put addEmptyLocation(Put p, int replicaId) throws IOException { CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - return p.add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getServerColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .build()) - .add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getStartCodeColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .build()) - .add(builder.clear() - .setRow(p.getRow()) - .setFamily(getCatalogFamily()) - .setQualifier(getSeqNumColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Cell.Type.Put) - .build()); + return p + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getServerColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Type.Put).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getStartCodeColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).build()) + .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) + .setQualifier(getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).build()); } public static final class ReplicationBarrierResult { @@ -2039,10 +1957,10 @@ public List getParentRegionNames() { @Override public String toString() { - return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" + - state + ", parentRegionNames=" + - parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) + - "]"; + return "ReplicationBarrierResult [barriers=" + Arrays.toString(barriers) + ", state=" + state + + ", parentRegionNames=" + + parentRegionNames.stream().map(Bytes::toStringBinary).collect(Collectors.joining(", ")) + + "]"; } } @@ -2052,18 +1970,19 @@ private static long getReplicationBarrier(Cell c) { public static long[] getReplicationBarriers(Result result) { return result.getColumnCells(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) - .stream().mapToLong(MetaTableAccessor::getReplicationBarrier).sorted().distinct().toArray(); + .stream().mapToLong(MetaTableAccessor::getReplicationBarrier).sorted().distinct().toArray(); } private static ReplicationBarrierResult getReplicationBarrierResult(Result result) { long[] barriers = getReplicationBarriers(result); byte[] stateBytes = result.getValue(getCatalogFamily(), getRegionStateColumn()); RegionState.State state = - stateBytes != null ? RegionState.State.valueOf(Bytes.toString(stateBytes)) : null; + stateBytes != null ? RegionState.State.valueOf(Bytes.toString(stateBytes)) : null; byte[] parentRegionsBytes = - result.getValue(HConstants.REPLICATION_BARRIER_FAMILY, REPLICATION_PARENT_QUALIFIER); + result.getValue(HConstants.REPLICATION_BARRIER_FAMILY, REPLICATION_PARENT_QUALIFIER); List parentRegionNames = - parentRegionsBytes != null ? parseParentsBytes(parentRegionsBytes) : Collections.emptyList(); + parentRegionsBytes != null ? parseParentsBytes(parentRegionsBytes) + : Collections.emptyList(); return new ReplicationBarrierResult(barriers, state, parentRegionNames); } @@ -2071,11 +1990,11 @@ public static ReplicationBarrierResult getReplicationBarrierResult(Connection co TableName tableName, byte[] row, byte[] encodedRegionName) throws IOException { byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); byte[] metaStopKey = - RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); + RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); Scan scan = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey) - .addColumn(getCatalogFamily(), getRegionStateColumn()) - .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true) - .setCaching(10); + .addColumn(getCatalogFamily(), getRegionStateColumn()) + .addFamily(HConstants.REPLICATION_BARRIER_FAMILY).readAllVersions().setReversed(true) + .setCaching(10); try (Table table = getMetaHTable(conn); ResultScanner scanner = table.getScanner(scan)) { for (Result result;;) { result = scanner.next(); @@ -2099,8 +2018,8 @@ public static long[] getReplicationBarrier(Connection conn, byte[] regionName) throws IOException { try (Table table = getMetaHTable(conn)) { Result result = table.get(new Get(regionName) - .addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) - .readAllVersions()); + .addColumn(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER) + .readAllVersions()); return getReplicationBarriers(result); } } @@ -2111,7 +2030,7 @@ public static List> getTableEncodedRegionNameAndLastBarrier(C scanMeta(conn, getTableStartRowForMeta(tableName, QueryType.REPLICATION), getTableStopRowForMeta(tableName, QueryType.REPLICATION), QueryType.REPLICATION, r -> { byte[] value = - r.getValue(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER); + r.getValue(HConstants.REPLICATION_BARRIER_FAMILY, HConstants.SEQNUM_QUALIFIER); if (value == null) { return true; } @@ -2151,13 +2070,9 @@ private static void debugLogMutation(Mutation p) throws IOException { } private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws IOException { - return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(p.getRow()) - .setFamily(HConstants.CATALOG_FAMILY) - .setQualifier(getSeqNumColumn(replicaId)) - .setTimestamp(p.getTimestamp()) - .setType(Type.Put) - .setValue(Bytes.toBytes(openSeqNum)) - .build()); + return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) + .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getSeqNumColumn(replicaId)) + .setTimestamp(p.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .build()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java index 3e06f4250af6..a49575849b04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MultiActionResultTooLarge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; /** - * Exception thrown when the result needs to be chunked on the server side. - * It signals that retries should happen right away and not count against the number of - * retries because some of the multi was a success. + * Exception thrown when the result needs to be chunked on the server side. It signals that retries + * should happen right away and not count against the number of retries because some of the multi + * was a success. */ @InterfaceAudience.Public public class MultiActionResultTooLarge extends RetryImmediatelyException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java index 5263523417ed..83e29fd9edc1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceExistException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java index 72ff1e61b849..0af01d23bddf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NamespaceNotFoundException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java index c51fccb5955d..8397d8857630 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotAllMetaRegionsOnlineException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; @@ -27,6 +25,7 @@ @InterfaceAudience.Public public class NotAllMetaRegionsOnlineException extends DoNotRetryIOException { private static final long serialVersionUID = 6439786157874827523L; + /** * default constructor */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java index 918408778c0d..aa138478b4ab 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java index e887928da828..473947b8f769 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseHoldException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,10 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This exception is thrown by the master when a region server was shut down and - * restarted so fast that the master still hasn't processed the server shutdown - * of the first instance, or when master is initializing and client call admin - * operations, or when an operation is performed on a region server that is still starting. + * This exception is thrown by the master when a region server was shut down and restarted so fast + * that the master still hasn't processed the server shutdown of the first instance, or when master + * is initializing and client call admin operations, or when an operation is performed on a region + * server that is still starting. */ @SuppressWarnings("serial") @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java index 62f84e9495be..5e60e44243a0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/PleaseRestartMasterException.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java index 406c41ee52c1..346677e03ea8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RSGroupTableAccessor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.client.Result; @@ -34,12 +32,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Read rs group information from hbase:rsgroup. + * Read rs group information from hbase:rsgroup. */ @InterfaceAudience.Private public final class RSGroupTableAccessor { - //Assigned before user tables + // Assigned before user tables private static final TableName RSGROUP_TABLE_NAME = TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "rsgroup"); private static final byte[] META_FAMILY_BYTES = Bytes.toBytes("m"); @@ -52,8 +50,7 @@ public static boolean isRSGroupsEnabled(Connection connection) throws IOExceptio return connection.getAdmin().tableExists(RSGROUP_TABLE_NAME); } - public static List getAllRSGroupInfo(Connection connection) - throws IOException { + public static List getAllRSGroupInfo(Connection connection) throws IOException { try (Table rsGroupTable = connection.getTable(RSGROUP_TABLE_NAME)) { List rsGroupInfos = new ArrayList<>(); for (Result result : rsGroupTable.getScanner(new Scan())) { @@ -71,14 +68,13 @@ private static RSGroupInfo getRSGroupInfo(Result result) throws IOException { if (rsGroupInfo == null) { return null; } - RSGroupProtos.RSGroupInfo proto = - RSGroupProtos.RSGroupInfo.parseFrom(rsGroupInfo); + RSGroupProtos.RSGroupInfo proto = RSGroupProtos.RSGroupInfo.parseFrom(rsGroupInfo); return ProtobufUtil.toGroupInfo(proto); } public static RSGroupInfo getRSGroupInfo(Connection connection, byte[] rsGroupName) throws IOException { - try (Table rsGroupTable = connection.getTable(RSGROUP_TABLE_NAME)){ + try (Table rsGroupTable = connection.getTable(RSGROUP_TABLE_NAME)) { Result result = rsGroupTable.get(new Get(rsGroupName)); return getRSGroupInfo(result); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java index 8a8d2151aa2e..aff9ff8af472 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +20,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown when something happens related to region handling. - * Subclasses have to be more specific. + * Thrown when something happens related to region handling. Subclasses have to be more specific. */ @InterfaceAudience.Public public class RegionException extends HBaseIOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index e05a0e6f1093..f4da492ef0df 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.List; @@ -28,12 +25,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; /** * Encapsulates per-region load metrics. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link RegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link RegionMetrics} + * instead. */ @InterfaceAudience.Public @Deprecated @@ -43,7 +41,7 @@ public class RegionLoad implements RegionMetrics { protected ClusterStatusProtos.RegionLoad regionLoadPB; private final RegionMetrics metrics; - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) { this.regionLoadPB = regionLoadPB; this.metrics = RegionMetricsBuilder.toRegionMetrics(regionLoadPB); @@ -56,8 +54,8 @@ public RegionLoad(ClusterStatusProtos.RegionLoad regionLoadPB) { /** * @return the region name - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionName} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link #getRegionName} + * instead. */ @Deprecated public byte[] getName() { @@ -151,8 +149,8 @@ public Size getUncompressedStoreFileSize() { /** * @return the number of stores - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link #getStoreCount} + * instead. */ @Deprecated public int getStores() { @@ -161,8 +159,8 @@ public int getStores() { /** * @return the number of storefiles - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileCount} instead. */ @Deprecated public int getStorefiles() { @@ -171,8 +169,8 @@ public int getStorefiles() { /** * @return the total size of the storefiles, in MB - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileSize} instead. */ @Deprecated public int getStorefileSizeMB() { @@ -181,8 +179,8 @@ public int getStorefileSizeMB() { /** * @return the memstore size, in MB - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMemStoreSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getMemStoreSize} instead. */ @Deprecated public int getMemStoreSizeMB() { @@ -191,8 +189,8 @@ public int getMemStoreSizeMB() { /** * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * ((HBASE-3935)). - * Use {@link #getStoreFileRootLevelIndexSize} instead. + * ((HBASE-3935)). Use + * {@link #getStoreFileRootLevelIndexSize} instead. */ @Deprecated public int getStorefileIndexSizeMB() { @@ -201,8 +199,8 @@ public int getStorefileIndexSizeMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileRootLevelIndexSize()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileRootLevelIndexSize()} instead. */ @Deprecated public int getStorefileIndexSizeKB() { @@ -211,8 +209,8 @@ public int getStorefileIndexSizeKB() { /** * @return the number of requests made to region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRequestCount()} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRequestCount()} instead. */ @Deprecated public long getRequestsCount() { @@ -221,8 +219,8 @@ public long getRequestsCount() { /** * @return the number of read requests made to region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getReadRequestCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getReadRequestCount} instead. */ @Deprecated public long getReadRequestsCount() { @@ -231,8 +229,8 @@ public long getReadRequestsCount() { /** * @return the number of filtered read requests made to region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getFilteredReadRequestCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getFilteredReadRequestCount} instead. */ @Deprecated public long getFilteredReadRequestsCount() { @@ -241,8 +239,8 @@ public long getFilteredReadRequestsCount() { /** * @return the number of write requests made to region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getWriteRequestCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getWriteRequestCount} instead. */ @Deprecated public long getWriteRequestsCount() { @@ -251,8 +249,8 @@ public long getWriteRequestsCount() { /** * @return The current total size of root-level indexes for the region, in KB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileRootLevelIndexSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileRootLevelIndexSize} instead. */ @Deprecated public int getRootIndexSizeKB() { @@ -261,8 +259,8 @@ public int getRootIndexSizeKB() { /** * @return The total size of all index blocks, not just the root level, in KB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreFileUncompressedDataIndexSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreFileUncompressedDataIndexSize} instead. */ @Deprecated public int getTotalStaticIndexSizeKB() { @@ -270,10 +268,9 @@ public int getTotalStaticIndexSizeKB() { } /** - * @return The total size of all Bloom filter blocks, not just loaded into the - * block cache, in KB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getBloomFilterSize} instead. + * @return The total size of all Bloom filter blocks, not just loaded into the block cache, in KB. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getBloomFilterSize} instead. */ @Deprecated public int getTotalStaticBloomSizeKB() { @@ -282,8 +279,8 @@ public int getTotalStaticBloomSizeKB() { /** * @return the total number of kvs in current compaction - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCompactingCellCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCompactingCellCount} instead. */ @Deprecated public long getTotalCompactingKVs() { @@ -292,8 +289,8 @@ public long getTotalCompactingKVs() { /** * @return the number of already compacted kvs in current compaction - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCompactedCellCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCompactedCellCount} instead. */ @Deprecated public long getCurrentCompactedKVs() { @@ -303,8 +300,8 @@ public long getCurrentCompactedKVs() { /** * This does not really belong inside RegionLoad but its being done in the name of expediency. * @return the completed sequence Id for the region - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCompletedSequenceId} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCompletedSequenceId} instead. */ @Deprecated public long getCompleteSequenceId() { @@ -313,23 +310,22 @@ public long getCompleteSequenceId() { /** * @return completed sequence id per store. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getStoreSequenceId} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getStoreSequenceId} instead. */ @Deprecated public List getStoreCompleteSequenceId() { return metrics.getStoreSequenceId().entrySet().stream() .map(s -> ClusterStatusProtos.StoreSequenceId.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(s.getKey())) - .setSequenceId(s.getValue()) - .build()) + .setFamilyName(UnsafeByteOperations.unsafeWrap(s.getKey())).setSequenceId(s.getValue()) + .build()) .collect(Collectors.toList()); } /** * @return the uncompressed size of the storefiles in MB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getUncompressedStoreFileSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getUncompressedStoreFileSize} instead. */ @Deprecated public int getStoreUncompressedSizeMB() { @@ -351,8 +347,8 @@ public long getLastMajorCompactionTimestamp() { /** * @return the timestamp of the oldest hfile for any store of this region. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getLastMajorCompactionTimestamp} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getLastMajorCompactionTimestamp} instead. */ @Deprecated public long getLastMajorCompactionTs() { @@ -401,47 +397,33 @@ public CompactionState getCompactionState() { */ @Override public String toString() { - StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "numberOfStores", - this.getStores()); + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "numberOfStores", this.getStores()); Strings.appendKeyValue(sb, "numberOfStorefiles", this.getStorefiles()); Strings.appendKeyValue(sb, "storeRefCount", this.getStoreRefCount()); - Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", - this.getStoreUncompressedSizeMB()); - Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", - this.getLastMajorCompactionTs()); + Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", this.getStoreUncompressedSizeMB()); + Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", this.getLastMajorCompactionTs()); Strings.appendKeyValue(sb, "storefileSizeMB", this.getStorefileSizeMB()); if (this.getStoreUncompressedSizeMB() != 0) { - Strings.appendKeyValue(sb, "compressionRatio", - String.format("%.4f", (float) this.getStorefileSizeMB() / - (float) this.getStoreUncompressedSizeMB())); + Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", + (float) this.getStorefileSizeMB() / (float) this.getStoreUncompressedSizeMB())); } - Strings.appendKeyValue(sb, "memstoreSizeMB", - this.getMemStoreSizeMB()); - Strings.appendKeyValue(sb, "readRequestsCount", - this.getReadRequestsCount()); - Strings.appendKeyValue(sb, "writeRequestsCount", - this.getWriteRequestsCount()); - Strings.appendKeyValue(sb, "rootIndexSizeKB", - this.getRootIndexSizeKB()); - Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", - this.getTotalStaticIndexSizeKB()); - Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", - this.getTotalStaticBloomSizeKB()); - Strings.appendKeyValue(sb, "totalCompactingKVs", - this.getTotalCompactingKVs()); - Strings.appendKeyValue(sb, "currentCompactedKVs", - this.getCurrentCompactedKVs()); + Strings.appendKeyValue(sb, "memstoreSizeMB", this.getMemStoreSizeMB()); + Strings.appendKeyValue(sb, "readRequestsCount", this.getReadRequestsCount()); + Strings.appendKeyValue(sb, "writeRequestsCount", this.getWriteRequestsCount()); + Strings.appendKeyValue(sb, "rootIndexSizeKB", this.getRootIndexSizeKB()); + Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", this.getTotalStaticIndexSizeKB()); + Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", this.getTotalStaticBloomSizeKB()); + Strings.appendKeyValue(sb, "totalCompactingKVs", this.getTotalCompactingKVs()); + Strings.appendKeyValue(sb, "currentCompactedKVs", this.getCurrentCompactedKVs()); float compactionProgressPct = Float.NaN; if (this.getTotalCompactingKVs() > 0) { - compactionProgressPct = ((float) this.getCurrentCompactedKVs() / - (float) this.getTotalCompactingKVs()); + compactionProgressPct = + ((float) this.getCurrentCompactedKVs() / (float) this.getTotalCompactingKVs()); } - Strings.appendKeyValue(sb, "compactionProgressPct", - compactionProgressPct); - Strings.appendKeyValue(sb, "completeSequenceId", - this.getCompleteSequenceId()); - Strings.appendKeyValue(sb, "dataLocality", - this.getDataLocality()); + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + Strings.appendKeyValue(sb, "completeSequenceId", this.getCompleteSequenceId()); + Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 0d3a464e0f86..0a297166e12d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,23 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Arrays; import java.util.Collection; import java.util.Iterator; - import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; /** - * Container for holding a list of {@link HRegionLocation}'s that correspond to the - * same range. The list is indexed by the replicaId. This is an immutable list, - * however mutation operations are provided which returns a new List via copy-on-write - * (assuming small number of locations) + * Container for holding a list of {@link HRegionLocation}'s that correspond to the same range. The + * list is indexed by the replicaId. This is an immutable list, however mutation operations are + * provided which returns a new List via copy-on-write (assuming small number of locations) */ @InterfaceAudience.Private public class RegionLocations implements Iterable { @@ -45,10 +42,9 @@ public class RegionLocations implements Iterable { private final HRegionLocation[] locations; // replicaId -> HRegionLocation. /** - * Constructs the region location list. The locations array should - * contain all the locations for known replicas for the region, and should be - * sorted in replicaId ascending order, although it can contain nulls indicating replicaIds - * that the locations of which are not known. + * Constructs the region location list. The locations array should contain all the locations for + * known replicas for the region, and should be sorted in replicaId ascending order, although it + * can contain nulls indicating replicaIds that the locations of which are not known. * @param locations an array of HRegionLocations for the same region range */ public RegionLocations(HRegionLocation... locations) { @@ -66,7 +62,7 @@ public RegionLocations(HRegionLocation... locations) { index++; } // account for the null elements in the array after maxReplicaIdIndex - maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1) ); + maxReplicaId = maxReplicaId + (locations.length - (maxReplicaIdIndex + 1)); if (maxReplicaId + 1 == locations.length) { this.locations = locations; @@ -79,7 +75,7 @@ public RegionLocations(HRegionLocation... locations) { } } for (HRegionLocation loc : this.locations) { - if (loc != null && loc.getServerName() != null){ + if (loc != null && loc.getServerName() != null) { numNonNullElements++; } } @@ -91,8 +87,7 @@ public RegionLocations(Collection locations) { } /** - * Returns the size of the list even if some of the elements - * might be null. + * Returns the size of the list even if some of the elements might be null. * @return the size of the list (corresponding to the max replicaId) */ public int size() { @@ -116,18 +111,18 @@ public boolean isEmpty() { } /** - * Returns a new RegionLocations with the locations removed (set to null) - * which have the destination server as given. + * Returns a new RegionLocations with the locations removed (set to null) which have the + * destination server as given. * @param serverName the serverName to remove locations of - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations removeByServer(ServerName serverName) { HRegionLocation[] newLocations = null; for (int i = 0; i < locations.length; i++) { // check whether something to remove if (locations[i] != null && serverName.equals(locations[i].getServerName())) { - if (newLocations == null) { //first time + if (newLocations == null) { // first time newLocations = new HRegionLocation[locations.length]; System.arraycopy(locations, 0, newLocations, 0, i); } @@ -142,8 +137,8 @@ public RegionLocations removeByServer(ServerName serverName) { /** * Removes the given location from the list * @param location the location to remove - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations remove(HRegionLocation location) { if (location == null) return this; @@ -153,9 +148,8 @@ public RegionLocations remove(HRegionLocation location) { // check whether something to remove. HRL.compareTo() compares ONLY the // serverName. We want to compare the HRI's as well. - if (locations[replicaId] == null - || RegionInfo.COMPARATOR.compare(location.getRegion(), locations[replicaId].getRegion()) != 0 - || !location.equals(locations[replicaId])) { + if (locations[replicaId] == null || RegionInfo.COMPARATOR.compare(location.getRegion(), + locations[replicaId].getRegion()) != 0 || !location.equals(locations[replicaId])) { return this; } @@ -169,8 +163,8 @@ public RegionLocations remove(HRegionLocation location) { /** * Removes location of the given replicaId from the list * @param replicaId the replicaId of the location to remove - * @return an RegionLocations object with removed locations or the same object - * if nothing is removed + * @return an RegionLocations object with removed locations or the same object if nothing is + * removed */ public RegionLocations remove(int replicaId) { if (getRegionLocation(replicaId) == null) { @@ -204,13 +198,11 @@ public RegionLocations removeElementsWithNullLocation() { } /** - * Merges this RegionLocations list with the given list assuming - * same range, and keeping the most up to date version of the - * HRegionLocation entries from either list according to seqNum. If seqNums - * are equal, the location from the argument (other) is taken. + * Merges this RegionLocations list with the given list assuming same range, and keeping the most + * up to date version of the HRegionLocation entries from either list according to seqNum. If + * seqNums are equal, the location from the argument (other) is taken. * @param other the locations to merge with - * @return an RegionLocations object with merged locations or the same object - * if nothing is merged + * @return an RegionLocations object with merged locations or the same object if nothing is merged */ public RegionLocations mergeLocations(RegionLocations other) { assert other != null; @@ -231,8 +223,7 @@ public RegionLocations mergeLocations(RegionLocations other) { regionInfo = otherLoc.getRegion(); } - HRegionLocation selectedLoc = selectRegionLocation(thisLoc, - otherLoc, true, false); + HRegionLocation selectedLoc = selectRegionLocation(thisLoc, otherLoc, true, false); if (selectedLoc != thisLoc) { if (newLocations == null) { @@ -247,10 +238,9 @@ public RegionLocations mergeLocations(RegionLocations other) { // ensure that all replicas share the same start code. Otherwise delete them if (newLocations != null && regionInfo != null) { - for (int i=0; i < newLocations.length; i++) { + for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { - if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, - newLocations[i].getRegion())) { + if (!RegionReplicaUtil.isReplicasForSameRegion(regionInfo, newLocations[i].getRegion())) { newLocations[i] = null; } } @@ -270,41 +260,39 @@ private HRegionLocation selectRegionLocation(HRegionLocation oldLocation, return location; } - if (force - || isGreaterThan(location.getSeqNum(), oldLocation.getSeqNum(), checkForEquals)) { + if (force || isGreaterThan(location.getSeqNum(), oldLocation.getSeqNum(), checkForEquals)) { return location; } return oldLocation; } /** - * Updates the location with new only if the new location has a higher - * seqNum than the old one or force is true. + * Updates the location with new only if the new location has a higher seqNum than the old one or + * force is true. * @param location the location to add or update - * @param checkForEquals whether to update the location if seqNums for the - * HRegionLocations for the old and new location are the same + * @param checkForEquals whether to update the location if seqNums for the HRegionLocations for + * the old and new location are the same * @param force whether to force update - * @return an RegionLocations object with updated locations or the same object - * if nothing is updated + * @return an RegionLocations object with updated locations or the same object if nothing is + * updated */ - public RegionLocations updateLocation(HRegionLocation location, - boolean checkForEquals, boolean force) { + public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, + boolean force) { assert location != null; int replicaId = location.getRegion().getReplicaId(); HRegionLocation oldLoc = getRegionLocation(location.getRegion().getReplicaId()); - HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, - checkForEquals, force); + HRegionLocation selectedLoc = selectRegionLocation(oldLoc, location, checkForEquals, force); if (selectedLoc == oldLoc) { return this; } - HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId +1)]; + HRegionLocation[] newLocations = new HRegionLocation[Math.max(locations.length, replicaId + 1)]; System.arraycopy(locations, 0, newLocations, 0, locations.length); newLocations[replicaId] = location; // ensure that all replicas share the same start code. Otherwise delete them - for (int i=0; i < newLocations.length; i++) { + for (int i = 0; i < newLocations.length; i++) { if (newLocations[i] != null) { if (!RegionReplicaUtil.isReplicasForSameRegion(location.getRegion(), newLocations[i].getRegion())) { @@ -327,8 +315,8 @@ public HRegionLocation getRegionLocation(int replicaId) { } /** - * Returns the region location from the list for matching regionName, which can - * be regionName or encodedRegionName + * Returns the region location from the list for matching regionName, which can be regionName or + * encodedRegionName * @param regionName regionName or encodedRegionName * @return HRegionLocation found or null */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 1a8e6c8c6556..73d036e73705 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Map; @@ -26,8 +23,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Encapsulates per-region load metrics. - */ + * Encapsulates per-region load metrics. + */ @InterfaceAudience.Public public interface RegionMetrics { @@ -87,8 +84,8 @@ default String getNameAsString() { /** * TODO: why we pass the same value to different counters? Currently, the value from - * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() - * see HRegionServer#createRegionLoad. + * getStoreFileIndexSize() is same with getStoreFileRootLevelIndexSize() see + * HRegionServer#createRegionLoad. * @return The current total size of root-level indexes for the region */ Size getStoreFileIndexSize(); @@ -129,7 +126,6 @@ default String getNameAsString() { */ Map getStoreSequenceId(); - /** * @return the uncompressed size of the storefiles */ @@ -151,8 +147,8 @@ default String getNameAsString() { int getStoreRefCount(); /** - * @return the max reference count for any store file among all compacted stores files - * of this region + * @return the max reference count for any store file among all compacted stores files of this + * region */ int getMaxCompactedStoreFileRefCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index cca6686f5861..83e6d9729121 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Collections; @@ -39,8 +36,8 @@ @InterfaceAudience.Private public final class RegionMetricsBuilder { - public static List toRegionMetrics( - AdminProtos.GetRegionLoadResponse regionLoadResponse) { + public static List + toRegionMetrics(AdminProtos.GetRegionLoadResponse regionLoadResponse) { return regionLoadResponse.getRegionLoadsList().stream() .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList()); } @@ -53,75 +50,70 @@ public static RegionMetrics toRegionMetrics(ClusterStatusProtos.RegionLoad regio .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) - .setDataLocalityForSsd(regionLoadPB.hasDataLocalityForSsd() ? - regionLoadPB.getDataLocalityForSsd() : 0.0f) - .setBlocksLocalWeight(regionLoadPB.hasBlocksLocalWeight() ? - regionLoadPB.getBlocksLocalWeight() : 0) - .setBlocksLocalWithSsdWeight(regionLoadPB.hasBlocksLocalWithSsdWeight() ? - regionLoadPB.getBlocksLocalWithSsdWeight() : 0) + .setDataLocalityForSsd( + regionLoadPB.hasDataLocalityForSsd() ? regionLoadPB.getDataLocalityForSsd() : 0.0f) + .setBlocksLocalWeight( + regionLoadPB.hasBlocksLocalWeight() ? regionLoadPB.getBlocksLocalWeight() : 0) + .setBlocksLocalWithSsdWeight( + regionLoadPB.hasBlocksLocalWithSsdWeight() ? regionLoadPB.getBlocksLocalWithSsdWeight() + : 0) .setBlocksTotalWeight(regionLoadPB.getBlocksTotalWeight()) - .setCompactionState(ProtobufUtil.createCompactionStateForRegionLoad( - regionLoadPB.getCompactionState())) + .setCompactionState( + ProtobufUtil.createCompactionStateForRegionLoad(regionLoadPB.getCompactionState())) .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) - .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), - Size.Unit.KILOBYTE)) + .setStoreFileUncompressedDataIndexSize( + new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) .setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs()) .setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE)) .setReadRequestCount(regionLoadPB.getReadRequestsCount()) .setWriteRequestCount(regionLoadPB.getWriteRequestsCount()) - .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setStoreFileRootLevelIndexSize(new Size(regionLoadPB.getRootIndexSizeKB(), - Size.Unit.KILOBYTE)) - .setStoreCount(regionLoadPB.getStores()) - .setStoreFileCount(regionLoadPB.getStorefiles()) + .setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(), Size.Unit.KILOBYTE)) + .setStoreFileRootLevelIndexSize( + new Size(regionLoadPB.getRootIndexSizeKB(), Size.Unit.KILOBYTE)) + .setStoreCount(regionLoadPB.getStores()).setStoreFileCount(regionLoadPB.getStorefiles()) .setStoreRefCount(regionLoadPB.getStoreRefCount()) .setMaxCompactedStoreFileRefCount(regionLoadPB.getMaxCompactedStoreFileRefCount()) .setStoreFileSize(new Size(regionLoadPB.getStorefileSizeMB(), Size.Unit.MEGABYTE)) .setStoreSequenceIds(regionLoadPB.getStoreCompleteSequenceIdList().stream() - .collect(Collectors.toMap( - (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), + .collect(Collectors.toMap( + (ClusterStatusProtos.StoreSequenceId s) -> s.getFamilyName().toByteArray(), ClusterStatusProtos.StoreSequenceId::getSequenceId))) .setUncompressedStoreFileSize( - new Size(regionLoadPB.getStoreUncompressedSizeMB(),Size.Unit.MEGABYTE)) + new Size(regionLoadPB.getStoreUncompressedSizeMB(), Size.Unit.MEGABYTE)) .build(); } - private static List toStoreSequenceId( - Map ids) { + private static List + toStoreSequenceId(Map ids) { return ids.entrySet().stream() .map(e -> ClusterStatusProtos.StoreSequenceId.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())) - .setSequenceId(e.getValue()) - .build()) + .setFamilyName(UnsafeByteOperations.unsafeWrap(e.getKey())).setSequenceId(e.getValue()) + .build()) .collect(Collectors.toList()); } public static ClusterStatusProtos.RegionLoad toRegionLoad(RegionMetrics regionMetrics) { return ClusterStatusProtos.RegionLoad.newBuilder() - .setRegionSpecifier(HBaseProtos.RegionSpecifier - .newBuilder().setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) - .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())) - .build()) - .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize() - .get(Size.Unit.KILOBYTE)) + .setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder() + .setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME) + .setValue(UnsafeByteOperations.unsafeWrap(regionMetrics.getRegionName())).build()) + .setTotalStaticBloomSizeKB((int) regionMetrics.getBloomFilterSize().get(Size.Unit.KILOBYTE)) .setCurrentCompactedKVs(regionMetrics.getCompactedCellCount()) .setTotalCompactingKVs(regionMetrics.getCompactingCellCount()) .setCompleteSequenceId(regionMetrics.getCompletedSequenceId()) .setDataLocality(regionMetrics.getDataLocality()) .setFilteredReadRequestsCount(regionMetrics.getFilteredReadRequestCount()) - .setTotalStaticIndexSizeKB((int) regionMetrics.getStoreFileUncompressedDataIndexSize() - .get(Size.Unit.KILOBYTE)) + .setTotalStaticIndexSizeKB( + (int) regionMetrics.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE)) .setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp()) .setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE)) .setReadRequestsCount(regionMetrics.getReadRequestCount()) .setWriteRequestsCount(regionMetrics.getWriteRequestCount()) - .setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize() - .get(Size.Unit.KILOBYTE)) - .setRootIndexSizeKB((int) regionMetrics.getStoreFileRootLevelIndexSize() - .get(Size.Unit.KILOBYTE)) - .setStores(regionMetrics.getStoreCount()) - .setStorefiles(regionMetrics.getStoreFileCount()) + .setStorefileIndexSizeKB( + (long) regionMetrics.getStoreFileIndexSize().get(Size.Unit.KILOBYTE)) + .setRootIndexSizeKB( + (int) regionMetrics.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE)) + .setStores(regionMetrics.getStoreCount()).setStorefiles(regionMetrics.getStoreFileCount()) .setStoreRefCount(regionMetrics.getStoreRefCount()) .setMaxCompactedStoreFileRefCount(regionMetrics.getMaxCompactedStoreFileRefCount()) .setStorefileSizeMB((int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE)) @@ -161,6 +153,7 @@ public static RegionMetricsBuilder newBuilder(byte[] name) { private long blocksLocalWithSsdWeight; private long blocksTotalWeight; private CompactionState compactionState; + private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -169,129 +162,134 @@ public RegionMetricsBuilder setStoreCount(int value) { this.storeCount = value; return this; } + public RegionMetricsBuilder setStoreFileCount(int value) { this.storeFileCount = value; return this; } + public RegionMetricsBuilder setStoreRefCount(int value) { this.storeRefCount = value; return this; } + public RegionMetricsBuilder setMaxCompactedStoreFileRefCount(int value) { this.maxCompactedStoreFileRefCount = value; return this; } + public RegionMetricsBuilder setCompactingCellCount(long value) { this.compactingCellCount = value; return this; } + public RegionMetricsBuilder setCompactedCellCount(long value) { this.compactedCellCount = value; return this; } + public RegionMetricsBuilder setStoreFileSize(Size value) { this.storeFileSize = value; return this; } + public RegionMetricsBuilder setMemStoreSize(Size value) { this.memStoreSize = value; return this; } + public RegionMetricsBuilder setStoreFileIndexSize(Size value) { this.indexSize = value; return this; } + public RegionMetricsBuilder setStoreFileRootLevelIndexSize(Size value) { this.rootLevelIndexSize = value; return this; } + public RegionMetricsBuilder setStoreFileUncompressedDataIndexSize(Size value) { this.uncompressedDataIndexSize = value; return this; } + public RegionMetricsBuilder setBloomFilterSize(Size value) { this.bloomFilterSize = value; return this; } + public RegionMetricsBuilder setUncompressedStoreFileSize(Size value) { this.uncompressedStoreFileSize = value; return this; } + public RegionMetricsBuilder setWriteRequestCount(long value) { this.writeRequestCount = value; return this; } + public RegionMetricsBuilder setReadRequestCount(long value) { this.readRequestCount = value; return this; } + public RegionMetricsBuilder setFilteredReadRequestCount(long value) { this.filteredReadRequestCount = value; return this; } + public RegionMetricsBuilder setCompletedSequenceId(long value) { this.completedSequenceId = value; return this; } + public RegionMetricsBuilder setStoreSequenceIds(Map value) { this.storeSequenceIds = value; return this; } + public RegionMetricsBuilder setDataLocality(float value) { this.dataLocality = value; return this; } + public RegionMetricsBuilder setLastMajorCompactionTimestamp(long value) { this.lastMajorCompactionTimestamp = value; return this; } + public RegionMetricsBuilder setDataLocalityForSsd(float value) { this.dataLocalityForSsd = value; return this; } + public RegionMetricsBuilder setBlocksLocalWeight(long value) { this.blocksLocalWeight = value; return this; } + public RegionMetricsBuilder setBlocksLocalWithSsdWeight(long value) { this.blocksLocalWithSsdWeight = value; return this; } + public RegionMetricsBuilder setBlocksTotalWeight(long value) { this.blocksTotalWeight = value; return this; } + public RegionMetricsBuilder setCompactionState(CompactionState compactionState) { this.compactionState = compactionState; return this; } public RegionMetrics build() { - return new RegionMetricsImpl(name, - storeCount, - storeFileCount, - storeRefCount, - maxCompactedStoreFileRefCount, - compactingCellCount, - compactedCellCount, - storeFileSize, - memStoreSize, - indexSize, - rootLevelIndexSize, - uncompressedDataIndexSize, - bloomFilterSize, - uncompressedStoreFileSize, - writeRequestCount, - readRequestCount, - filteredReadRequestCount, - completedSequenceId, - storeSequenceIds, - dataLocality, - lastMajorCompactionTimestamp, - dataLocalityForSsd, - blocksLocalWeight, - blocksLocalWithSsdWeight, - blocksTotalWeight, + return new RegionMetricsImpl(name, storeCount, storeFileCount, storeRefCount, + maxCompactedStoreFileRefCount, compactingCellCount, compactedCellCount, storeFileSize, + memStoreSize, indexSize, rootLevelIndexSize, uncompressedDataIndexSize, bloomFilterSize, + uncompressedStoreFileSize, writeRequestCount, readRequestCount, filteredReadRequestCount, + completedSequenceId, storeSequenceIds, dataLocality, lastMajorCompactionTimestamp, + dataLocalityForSsd, blocksLocalWeight, blocksLocalWithSsdWeight, blocksTotalWeight, compactionState); } @@ -322,32 +320,15 @@ private static class RegionMetricsImpl implements RegionMetrics { private final long blocksLocalWithSsdWeight; private final long blocksTotalWeight; private final CompactionState compactionState; - RegionMetricsImpl(byte[] name, - int storeCount, - int storeFileCount, - int storeRefCount, - int maxCompactedStoreFileRefCount, - final long compactingCellCount, - long compactedCellCount, - Size storeFileSize, - Size memStoreSize, - Size indexSize, - Size rootLevelIndexSize, - Size uncompressedDataIndexSize, - Size bloomFilterSize, - Size uncompressedStoreFileSize, - long writeRequestCount, - long readRequestCount, - long filteredReadRequestCount, - long completedSequenceId, - Map storeSequenceIds, - float dataLocality, - long lastMajorCompactionTimestamp, - float dataLocalityForSsd, - long blocksLocalWeight, - long blocksLocalWithSsdWeight, - long blocksTotalWeight, - CompactionState compactionState) { + + RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, int storeRefCount, + int maxCompactedStoreFileRefCount, final long compactingCellCount, long compactedCellCount, + Size storeFileSize, Size memStoreSize, Size indexSize, Size rootLevelIndexSize, + Size uncompressedDataIndexSize, Size bloomFilterSize, Size uncompressedStoreFileSize, + long writeRequestCount, long readRequestCount, long filteredReadRequestCount, + long completedSequenceId, Map storeSequenceIds, float dataLocality, + long lastMajorCompactionTimestamp, float dataLocalityForSsd, long blocksLocalWeight, + long blocksLocalWithSsdWeight, long blocksTotalWeight, CompactionState compactionState) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -508,63 +489,43 @@ public CompactionState getCompactionState() { @Override public String toString() { - StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", - this.getStoreCount()); - Strings.appendKeyValue(sb, "storeFileCount", - this.getStoreFileCount()); - Strings.appendKeyValue(sb, "storeRefCount", - this.getStoreRefCount()); + StringBuilder sb = + Strings.appendKeyValue(new StringBuilder(), "storeCount", this.getStoreCount()); + Strings.appendKeyValue(sb, "storeFileCount", this.getStoreFileCount()); + Strings.appendKeyValue(sb, "storeRefCount", this.getStoreRefCount()); Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", this.getMaxCompactedStoreFileRefCount()); - Strings.appendKeyValue(sb, "uncompressedStoreFileSize", - this.getUncompressedStoreFileSize()); + Strings.appendKeyValue(sb, "uncompressedStoreFileSize", this.getUncompressedStoreFileSize()); Strings.appendKeyValue(sb, "lastMajorCompactionTimestamp", - this.getLastMajorCompactionTimestamp()); - Strings.appendKeyValue(sb, "storeFileSize", - this.getStoreFileSize()); + this.getLastMajorCompactionTimestamp()); + Strings.appendKeyValue(sb, "storeFileSize", this.getStoreFileSize()); if (this.getUncompressedStoreFileSize().get() != 0) { Strings.appendKeyValue(sb, "compressionRatio", - String.format("%.4f", - (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) / - (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); + String.format("%.4f", (float) this.getStoreFileSize().get(Size.Unit.MEGABYTE) + / (float) this.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE))); } - Strings.appendKeyValue(sb, "memStoreSize", - this.getMemStoreSize()); - Strings.appendKeyValue(sb, "readRequestCount", - this.getReadRequestCount()); - Strings.appendKeyValue(sb, "writeRequestCount", - this.getWriteRequestCount()); - Strings.appendKeyValue(sb, "rootLevelIndexSize", - this.getStoreFileRootLevelIndexSize()); + Strings.appendKeyValue(sb, "memStoreSize", this.getMemStoreSize()); + Strings.appendKeyValue(sb, "readRequestCount", this.getReadRequestCount()); + Strings.appendKeyValue(sb, "writeRequestCount", this.getWriteRequestCount()); + Strings.appendKeyValue(sb, "rootLevelIndexSize", this.getStoreFileRootLevelIndexSize()); Strings.appendKeyValue(sb, "uncompressedDataIndexSize", - this.getStoreFileUncompressedDataIndexSize()); - Strings.appendKeyValue(sb, "bloomFilterSize", - this.getBloomFilterSize()); - Strings.appendKeyValue(sb, "compactingCellCount", - this.getCompactingCellCount()); - Strings.appendKeyValue(sb, "compactedCellCount", - this.getCompactedCellCount()); + this.getStoreFileUncompressedDataIndexSize()); + Strings.appendKeyValue(sb, "bloomFilterSize", this.getBloomFilterSize()); + Strings.appendKeyValue(sb, "compactingCellCount", this.getCompactingCellCount()); + Strings.appendKeyValue(sb, "compactedCellCount", this.getCompactedCellCount()); float compactionProgressPct = Float.NaN; if (this.getCompactingCellCount() > 0) { - compactionProgressPct = ((float) this.getCompactedCellCount() / - (float) this.getCompactingCellCount()); + compactionProgressPct = + ((float) this.getCompactedCellCount() / (float) this.getCompactingCellCount()); } - Strings.appendKeyValue(sb, "compactionProgressPct", - compactionProgressPct); - Strings.appendKeyValue(sb, "completedSequenceId", - this.getCompletedSequenceId()); - Strings.appendKeyValue(sb, "dataLocality", - this.getDataLocality()); - Strings.appendKeyValue(sb, "dataLocalityForSsd", - this.getDataLocalityForSsd()); - Strings.appendKeyValue(sb, "blocksLocalWeight", - blocksLocalWeight); - Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", - blocksLocalWithSsdWeight); - Strings.appendKeyValue(sb, "blocksTotalWeight", - blocksTotalWeight); - Strings.appendKeyValue(sb, "compactionState", - compactionState); + Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); + Strings.appendKeyValue(sb, "completedSequenceId", this.getCompletedSequenceId()); + Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); + Strings.appendKeyValue(sb, "dataLocalityForSsd", this.getDataLocalityForSsd()); + Strings.appendKeyValue(sb, "blocksLocalWeight", blocksLocalWeight); + Strings.appendKeyValue(sb, "blocksLocalWithSsdWeight", blocksLocalWithSsdWeight); + Strings.appendKeyValue(sb, "blocksTotalWeight", blocksTotalWeight); + Strings.appendKeyValue(sb, "compactionState", compactionState); return sb.toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java index 3024962ebd67..4cdb4ea2ade6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionTooBusyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,15 +18,14 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown by a region server if it will block and wait to serve a request. - * For example, the client wants to insert something to a region while the - * region is compacting. Keep variance in the passed 'msg' low because its msg is used as a key - * over in {@link org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException} - * grouping failure types. + * Thrown by a region server if it will block and wait to serve a request. For example, the client + * wants to insert something to a region while the region is compacting. Keep variance in the passed + * 'msg' low because its msg is used as a key over in + * {@link org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException} grouping failure + * types. */ @InterfaceAudience.Public public class RegionTooBusyException extends IOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java index 6f02df2028f9..4d1deebb4e87 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ReplicationPeerNotFoundException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java index 9df4f893c714..46cc77c61b8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RetryImmediatelyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 7b8f713ddb7f..60e84baf6efa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Arrays; @@ -33,13 +30,13 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Objects; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; /** * This class is used for exporting current state of load on a RegionServer. - * - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link ServerMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link ServerMetrics} + * instead. */ @InterfaceAudience.Public @Deprecated @@ -68,7 +65,7 @@ public ServerLoad(ClusterStatusProtos.ServerLoad serverLoad) { this(ServerName.valueOf("localhost,1,1"), serverLoad); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "URF_UNREAD_PUBLIC_OR_PROTECTED_FIELD") @InterfaceAudience.Private public ServerLoad(ServerName name, ClusterStatusProtos.ServerLoad serverLoad) { this(ServerMetricsBuilder.toServerMetrics(name, serverLoad)); @@ -112,9 +109,9 @@ public ClusterStatusProtos.ServerLoad obtainServerLoadPB() { protected ClusterStatusProtos.ServerLoad serverLoad; /** - * @return number of requests since last report. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getRequestCountPerSecond} instead. + * @return number of requests since last report. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getRequestCountPerSecond} instead. */ @Deprecated public long getNumberOfRequests() { @@ -122,8 +119,7 @@ public long getNumberOfRequests() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean hasNumberOfRequests() { @@ -132,8 +128,8 @@ public boolean hasNumberOfRequests() { /** * @return total Number of requests from the start of the region server. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getRequestCount} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getRequestCount} instead. */ @Deprecated public long getTotalNumberOfRequests() { @@ -141,8 +137,7 @@ public long getTotalNumberOfRequests() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean hasTotalNumberOfRequests() { @@ -151,8 +146,8 @@ public boolean hasTotalNumberOfRequests() { /** * @return the amount of used heap, in MB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getUsedHeapSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getUsedHeapSize} instead. */ @Deprecated public int getUsedHeapMB() { @@ -160,8 +155,7 @@ public int getUsedHeapMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean hasUsedHeapMB() { @@ -170,8 +164,8 @@ public boolean hasUsedHeapMB() { /** * @return the maximum allowable size of the heap, in MB. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getMaxHeapSize} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getMaxHeapSize} instead. */ @Deprecated public int getMaxHeapMB() { @@ -179,8 +173,7 @@ public int getMaxHeapMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * No flag in 2.0 + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated public boolean hasMaxHeapMB() { @@ -188,8 +181,8 @@ public boolean hasMaxHeapMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStores() { @@ -197,8 +190,8 @@ public int getStores() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefiles() { @@ -206,8 +199,8 @@ public int getStorefiles() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStoreUncompressedSizeMB() { @@ -215,8 +208,8 @@ public int getStoreUncompressedSizeMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefileSizeInMB() { @@ -224,8 +217,8 @@ public int getStorefileSizeInMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefileSizeMB() { @@ -233,8 +226,8 @@ public int getStorefileSizeMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getMemstoreSizeInMB() { @@ -242,8 +235,8 @@ public int getMemstoreSizeInMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getMemStoreSizeMB() { @@ -251,8 +244,8 @@ public int getMemStoreSizeMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getStorefileIndexSizeInMB() { @@ -261,8 +254,8 @@ public int getStorefileIndexSizeInMB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getStorefileIndexSizeKB() { @@ -270,8 +263,8 @@ public long getStorefileIndexSizeKB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getReadRequestsCount() { @@ -279,8 +272,8 @@ public long getReadRequestsCount() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getFilteredReadRequestsCount() { @@ -288,8 +281,8 @@ public long getFilteredReadRequestsCount() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getWriteRequestsCount() { @@ -297,8 +290,8 @@ public long getWriteRequestsCount() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getRootIndexSizeKB() { @@ -306,8 +299,8 @@ public int getRootIndexSizeKB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getTotalStaticIndexSizeKB() { @@ -315,8 +308,8 @@ public int getTotalStaticIndexSizeKB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getTotalStaticBloomSizeKB() { @@ -324,8 +317,8 @@ public int getTotalStaticBloomSizeKB() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getTotalCompactingKVs() { @@ -333,8 +326,8 @@ public long getTotalCompactingKVs() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public long getCurrentCompactedKVs() { @@ -342,8 +335,8 @@ public long getCurrentCompactedKVs() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public int getNumberOfRegions() { @@ -412,7 +405,8 @@ public Map getRegionMetrics() { return metrics.getRegionMetrics(); } - @Override public Map getUserMetrics() { + @Override + public Map getUserMetrics() { return metrics.getUserMetrics(); } @@ -437,15 +431,13 @@ public List getTasks() { } /** - * Originally, this method factored in the effect of requests going to the - * server as well. However, this does not interact very well with the current - * region rebalancing code, which only factors number of regions. For the - * interim, until we can figure out how to make rebalancing use all the info - * available, we're just going to make load purely the number of regions. - * + * Originally, this method factored in the effect of requests going to the server as well. + * However, this does not interact very well with the current region rebalancing code, which only + * factors number of regions. For the interim, until we can figure out how to make rebalancing use + * all the info available, we're just going to make load purely the number of regions. * @return load factor for this server. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getNumberOfRegions} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getNumberOfRegions} instead. */ @Deprecated public int getLoad() { @@ -457,21 +449,20 @@ public int getLoad() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegionMetrics} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegionMetrics} instead. */ @Deprecated public Map getRegionsLoad() { - return getRegionMetrics().entrySet().stream() - .collect(Collectors.toMap(Map.Entry::getKey, e -> new RegionLoad(e.getValue()), - (v1, v2) -> { - throw new RuntimeException("key collisions?"); - }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); + return getRegionMetrics().entrySet().stream().collect( + Collectors.toMap(Map.Entry::getKey, e -> new RegionLoad(e.getValue()), (v1, v2) -> { + throw new RuntimeException("key collisions?"); + }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCoprocessorNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCoprocessorNames} instead. */ @Deprecated public String[] getRegionServerCoprocessors() { @@ -479,8 +470,8 @@ public String[] getRegionServerCoprocessors() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getCoprocessorNames} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getCoprocessorNames} instead. */ @Deprecated public String[] getRsCoprocessors() { @@ -488,8 +479,8 @@ public String[] getRsCoprocessors() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRequestCountPerSecond} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRequestCountPerSecond} instead. */ @Deprecated public double getRequestsPerSecond() { @@ -509,24 +500,23 @@ public String toString() { Strings.appendKeyValue(sb, "numberOfStores", Integer.valueOf(this.stores)); Strings.appendKeyValue(sb, "numberOfStorefiles", Integer.valueOf(this.storefiles)); Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", - Integer.valueOf(this.storeUncompressedSizeMB)); + Integer.valueOf(this.storeUncompressedSizeMB)); Strings.appendKeyValue(sb, "storefileSizeMB", Integer.valueOf(this.storefileSizeMB)); if (this.storeUncompressedSizeMB != 0) { - Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", - (float) this.storefileSizeMB / (float) this.storeUncompressedSizeMB)); + Strings.appendKeyValue(sb, "compressionRatio", + String.format("%.4f", (float) this.storefileSizeMB / (float) this.storeUncompressedSizeMB)); } Strings.appendKeyValue(sb, "memstoreSizeMB", Integer.valueOf(this.memstoreSizeMB)); - Strings.appendKeyValue(sb, "storefileIndexSizeKB", - Long.valueOf(this.storefileIndexSizeKB)); + Strings.appendKeyValue(sb, "storefileIndexSizeKB", Long.valueOf(this.storefileIndexSizeKB)); Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount)); Strings.appendKeyValue(sb, "filteredReadRequestsCount", - Long.valueOf(this.filteredReadRequestsCount)); + Long.valueOf(this.filteredReadRequestsCount)); Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount)); Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB)); Strings.appendKeyValue(sb, "totalStaticIndexSizeKB", - Integer.valueOf(this.totalStaticIndexSizeKB)); + Integer.valueOf(this.totalStaticIndexSizeKB)); Strings.appendKeyValue(sb, "totalStaticBloomSizeKB", - Integer.valueOf(this.totalStaticBloomSizeKB)); + Integer.valueOf(this.totalStaticBloomSizeKB)); Strings.appendKeyValue(sb, "totalCompactingKVs", Long.valueOf(this.totalCompactingKVs)); Strings.appendKeyValue(sb, "currentCompactedKVs", Long.valueOf(this.currentCompactedKVs)); float compactionProgressPct = Float.NaN; @@ -544,17 +534,16 @@ public String toString() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link ServerMetricsBuilder#of(ServerName)} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link ServerMetricsBuilder#of(ServerName)} instead. */ @Deprecated - public static final ServerLoad EMPTY_SERVERLOAD = - new ServerLoad(ServerName.valueOf("localhost,1,1"), - ClusterStatusProtos.ServerLoad.newBuilder().build()); + public static final ServerLoad EMPTY_SERVERLOAD = new ServerLoad( + ServerName.valueOf("localhost,1,1"), ClusterStatusProtos.ServerLoad.newBuilder().build()); /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getReportTimestamp} instead. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getReportTimestamp} instead. */ @Deprecated public long getReportTime() { @@ -563,11 +552,10 @@ public long getReportTime() { @Override public int hashCode() { - return Objects - .hashCode(stores, storefiles, storeUncompressedSizeMB, storefileSizeMB, memstoreSizeMB, - storefileIndexSizeKB, readRequestsCount, filteredReadRequestsCount, writeRequestsCount, - rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs, - currentCompactedKVs); + return Objects.hashCode(stores, storefiles, storeUncompressedSizeMB, storefileSizeMB, + memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, filteredReadRequestsCount, + writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, + totalCompactingKVs, currentCompactedKVs); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java index 893534cba5f0..0ded27524a02 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetrics.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 89cee9ffaf6f..1850ab42662a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,18 +6,18 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import edu.umd.cs.findbugs.annotations.Nullable; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -37,6 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @@ -69,32 +69,31 @@ public static ServerMetrics toServerMetrics(ServerName serverName, public static ServerMetrics toServerMetrics(ServerName serverName, int versionNumber, String version, ClusterStatusProtos.ServerLoad serverLoadPB) { return ServerMetricsBuilder.newBuilder(serverName) - .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests()) - .setRequestCount(serverLoadPB.getTotalNumberOfRequests()) - .setInfoServerPort(serverLoadPB.getInfoServerPort()) - .setMaxHeapSize(new Size(serverLoadPB.getMaxHeapMB(), Size.Unit.MEGABYTE)) - .setUsedHeapSize(new Size(serverLoadPB.getUsedHeapMB(), Size.Unit.MEGABYTE)) - .setCoprocessorNames(serverLoadPB.getCoprocessorsList().stream() - .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) - .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream() - .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList())) + .setRequestCountPerSecond(serverLoadPB.getNumberOfRequests()) + .setRequestCount(serverLoadPB.getTotalNumberOfRequests()) + .setInfoServerPort(serverLoadPB.getInfoServerPort()) + .setMaxHeapSize(new Size(serverLoadPB.getMaxHeapMB(), Size.Unit.MEGABYTE)) + .setUsedHeapSize(new Size(serverLoadPB.getUsedHeapMB(), Size.Unit.MEGABYTE)) + .setCoprocessorNames(serverLoadPB.getCoprocessorsList().stream() + .map(HBaseProtos.Coprocessor::getName).collect(Collectors.toList())) + .setRegionMetrics(serverLoadPB.getRegionLoadsList().stream() + .map(RegionMetricsBuilder::toRegionMetrics).collect(Collectors.toList())) .setUserMetrics(serverLoadPB.getUserLoadsList().stream() .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) - .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream() - .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) - .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() - ? ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) - : null) - .setTasks(serverLoadPB.getTasksList().stream() - .map(ProtobufUtil::getServerTask).collect(Collectors.toList())) - .setReportTimestamp(serverLoadPB.getReportEndTime()) - .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber) - .setVersion(version).build(); + .setReplicationLoadSources(serverLoadPB.getReplLoadSourceList().stream() + .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) + .setReplicationLoadSink(serverLoadPB.hasReplLoadSink() + ? ProtobufUtil.toReplicationLoadSink(serverLoadPB.getReplLoadSink()) + : null) + .setTasks(serverLoadPB.getTasksList().stream().map(ProtobufUtil::getServerTask) + .collect(Collectors.toList())) + .setReportTimestamp(serverLoadPB.getReportEndTime()) + .setLastReportTimestamp(serverLoadPB.getReportStartTime()).setVersionNumber(versionNumber) + .setVersion(version).build(); } public static List toCoprocessor(Collection names) { - return names.stream() - .map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) + return names.stream().map(n -> HBaseProtos.Coprocessor.newBuilder().setName(n).build()) .collect(Collectors.toList()); } @@ -106,18 +105,14 @@ public static ClusterStatusProtos.ServerLoad toServerLoad(ServerMetrics metrics) .setMaxHeapMB((int) metrics.getMaxHeapSize().get(Size.Unit.MEGABYTE)) .setUsedHeapMB((int) metrics.getUsedHeapSize().get(Size.Unit.MEGABYTE)) .addAllCoprocessors(toCoprocessor(metrics.getCoprocessorNames())) - .addAllRegionLoads( - metrics.getRegionMetrics().values().stream().map(RegionMetricsBuilder::toRegionLoad) - .collect(Collectors.toList())) - .addAllUserLoads( - metrics.getUserMetrics().values().stream().map(UserMetricsBuilder::toUserMetrics) - .collect(Collectors.toList())) - .addAllReplLoadSource( - metrics.getReplicationLoadSourceList().stream() - .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) + .addAllRegionLoads(metrics.getRegionMetrics().values().stream() + .map(RegionMetricsBuilder::toRegionLoad).collect(Collectors.toList())) + .addAllUserLoads(metrics.getUserMetrics().values().stream() + .map(UserMetricsBuilder::toUserMetrics).collect(Collectors.toList())) + .addAllReplLoadSource(metrics.getReplicationLoadSourceList().stream() + .map(ProtobufUtil::toReplicationLoadSource).collect(Collectors.toList())) .addAllTasks( - metrics.getTasks().stream().map(ProtobufUtil::toServerTask) - .collect(Collectors.toList())) + metrics.getTasks().stream().map(ProtobufUtil::toServerTask).collect(Collectors.toList())) .setReportStartTime(metrics.getLastReportTimestamp()) .setReportEndTime(metrics.getReportTimestamp()); if (metrics.getReplicationLoadSink() != null) { @@ -228,23 +223,9 @@ public ServerMetricsBuilder setTasks(List tasks) { } public ServerMetrics build() { - return new ServerMetricsImpl( - serverName, - versionNumber, - version, - requestCountPerSecond, - requestCount, - usedHeapSize, - maxHeapSize, - infoServerPort, - sources, - sink, - regionStatus, - coprocessorNames, - reportTimestamp, - lastReportTimestamp, - userMetrics, - tasks); + return new ServerMetricsImpl(serverName, versionNumber, version, requestCountPerSecond, + requestCount, usedHeapSize, maxHeapSize, infoServerPort, sources, sink, regionStatus, + coprocessorNames, reportTimestamp, lastReportTimestamp, userMetrics, tasks); } private static class ServerMetricsImpl implements ServerMetrics { @@ -269,9 +250,8 @@ private static class ServerMetricsImpl implements ServerMetrics { ServerMetricsImpl(ServerName serverName, int versionNumber, String version, long requestCountPerSecond, long requestCount, Size usedHeapSize, Size maxHeapSize, int infoServerPort, List sources, ReplicationLoadSink sink, - Map regionStatus, Set coprocessorNames, - long reportTimestamp, long lastReportTimestamp, Map userMetrics, - List tasks) { + Map regionStatus, Set coprocessorNames, long reportTimestamp, + long lastReportTimestamp, Map userMetrics, List tasks) { this.serverName = Preconditions.checkNotNull(serverName); this.versionNumber = versionNumber; this.version = version; @@ -284,7 +264,7 @@ private static class ServerMetricsImpl implements ServerMetrics { this.sink = sink; this.regionStatus = Preconditions.checkNotNull(regionStatus); this.userMetrics = Preconditions.checkNotNull(userMetrics); - this.coprocessorNames =Preconditions.checkNotNull(coprocessorNames); + this.coprocessorNames = Preconditions.checkNotNull(coprocessorNames); this.reportTimestamp = reportTimestamp; this.lastReportTimestamp = lastReportTimestamp; this.tasks = tasks; @@ -335,11 +315,11 @@ public List getReplicationLoadSourceList() { } @Override - public Map> getReplicationLoadSourceMap(){ - Map> sourcesMap = new HashMap<>(); - for(ReplicationLoadSource loadSource : sources){ - sourcesMap.computeIfAbsent(loadSource.getPeerID(), - peerId -> new ArrayList<>()).add(loadSource); + public Map> getReplicationLoadSourceMap() { + Map> sourcesMap = new HashMap<>(); + for (ReplicationLoadSource loadSource : sources) { + sourcesMap.computeIfAbsent(loadSource.getPeerID(), peerId -> new ArrayList<>()) + .add(loadSource); } return sourcesMap; } @@ -401,8 +381,8 @@ public String toString() { storeFileCount += r.getStoreFileCount(); storeRefCount += r.getStoreRefCount(); int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount(); - maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, - currentMaxCompactedStoreFileRefCount); + maxCompactedStoreFileRefCount = + Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); @@ -416,21 +396,20 @@ public String toString() { compactingCellCount += r.getCompactingCellCount(); } StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "requestsPerSecond", - Double.valueOf(getRequestCountPerSecond())); + Double.valueOf(getRequestCountPerSecond())); Strings.appendKeyValue(sb, "numberOfOnlineRegions", - Integer.valueOf(getRegionMetrics().size())); + Integer.valueOf(getRegionMetrics().size())); Strings.appendKeyValue(sb, "usedHeapMB", getUsedHeapSize()); Strings.appendKeyValue(sb, "maxHeapMB", getMaxHeapSize()); Strings.appendKeyValue(sb, "numberOfStores", storeCount); Strings.appendKeyValue(sb, "numberOfStorefiles", storeFileCount); Strings.appendKeyValue(sb, "storeRefCount", storeRefCount); - Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", - maxCompactedStoreFileRefCount); + Strings.appendKeyValue(sb, "maxCompactedStoreFileRefCount", maxCompactedStoreFileRefCount); Strings.appendKeyValue(sb, "storefileUncompressedSizeMB", uncompressedStoreFileSizeMB); Strings.appendKeyValue(sb, "storefileSizeMB", storeFileSizeMB); if (uncompressedStoreFileSizeMB != 0) { - Strings.appendKeyValue(sb, "compressionRatio", String.format("%.4f", - (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); + Strings.appendKeyValue(sb, "compressionRatio", + String.format("%.4f", (float) storeFileSizeMB / (float) uncompressedStoreFileSizeMB)); } Strings.appendKeyValue(sb, "memstoreSizeMB", memStoreSizeMB); Strings.appendKeyValue(sb, "readRequestsCount", readRequestsCount); @@ -443,8 +422,7 @@ public String toString() { Strings.appendKeyValue(sb, "currentCompactedKVs", compactedCellCount); float compactionProgressPct = Float.NaN; if (compactingCellCount > 0) { - compactionProgressPct = - Float.valueOf((float) compactedCellCount / compactingCellCount); + compactionProgressPct = Float.valueOf((float) compactedCellCount / compactingCellCount); } Strings.appendKeyValue(sb, "compactionProgressPct", compactionProgressPct); Strings.appendKeyValue(sb, "coprocessors", getCoprocessorNames()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java index e791093e43d7..d11ea86ab11b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTask.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -25,10 +25,7 @@ public interface ServerTask { /** Task state */ enum State { - RUNNING, - WAITING, - COMPLETE, - ABORTED; + RUNNING, WAITING, COMPLETE, ABORTED; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java index d4937373789e..20b7065948ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerTaskBuilder.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -33,7 +33,8 @@ public static ServerTaskBuilder newBuilder() { private long startTime; private long completionTime; - private ServerTaskBuilder() { } + private ServerTaskBuilder() { + } private static final class ServerTaskImpl implements ServerTask { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java index 0e7716a0a619..039954a14410 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/Size.java @@ -1,5 +1,4 @@ -/** - * Copyright The Apache Software Foundation +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,14 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.math.BigDecimal; @@ -24,8 +24,8 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * It is used to represent the size with different units. - * This class doesn't serve for the precise computation. + * It is used to represent the size with different units. This class doesn't serve for the precise + * computation. */ @InterfaceAudience.Public public final class Size implements Comparable { @@ -34,12 +34,9 @@ public final class Size implements Comparable { public enum Unit { // keep the room to add more units for HBase 10.x - PETABYTE(100, "PB"), - TERABYTE(99, "TB"), - GIGABYTE(98, "GB"), - MEGABYTE(97, "MB"), - KILOBYTE(96, "KB"), - BYTE(95, "B"); + PETABYTE(100, "PB"), TERABYTE(99, "TB"), GIGABYTE(98, "GB"), MEGABYTE(97, "MB"), + KILOBYTE(96, "KB"), BYTE(95, "B"); + private final int orderOfSize; private final String simpleName; @@ -91,7 +88,6 @@ public double get() { /** * get the value which is converted to specified unit. - * * @param unit size unit * @return the converted value */ @@ -146,7 +142,7 @@ public boolean equals(Object obj) { return true; } if (obj instanceof Size) { - return compareTo((Size)obj) == 0; + return compareTo((Size) obj) == 0; } return false; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java index 9d67a37695ca..ae6721813a8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java index a113f7c67bf0..0e6dd90d5407 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableInfoMissingException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java index 7e5046538abc..54f44405c584 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java index 90c015674ca6..14720811ca16 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotEnabledException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,6 +26,7 @@ @InterfaceAudience.Public public class TableNotEnabledException extends DoNotRetryIOException { private static final long serialVersionUID = 262144L; + /** default constructor */ public TableNotEnabledException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java index ae114fed0e62..416d8601fc3b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotFoundException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java index 850cd9600623..9f1eb30ba20f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,6 @@ public class UnknownRegionException extends DoNotRetryRegionException { /** * Constructs a new UnknownRegionException with the specified detail message. - * * @param message the detail message */ public UnknownRegionException(String message) { @@ -39,7 +37,6 @@ public UnknownRegionException(String message) { /** * Constructs a new UnknownRegionException with the specified detail message and cause. - * * @param message the detail message * @param cause the cause of the exception */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java index 14afb977b5de..36cdab1ac655 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UnknownScannerException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Thrown if a region server is passed an unknown scanner ID. - * This usually means that the client has taken too long between checkins and so the - * scanner lease on the server-side has expired OR the server-side is closing - * down and has cancelled all leases. + * Thrown if a region server is passed an unknown scanner ID. This usually means that the client has + * taken too long between checkins and so the scanner lease on the server-side has expired OR the + * server-side is closing down and has cancelled all leases. */ @InterfaceAudience.Public public class UnknownScannerException extends DoNotRetryIOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java index 6c2ba07cc3d6..2710aa9be273 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Map; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulates per-user load metrics. - */ + * Encapsulates per-user load metrics. + */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface UserMetrics { @@ -60,8 +56,8 @@ interface ClientMetrics { long getWriteRequestCount(); /** - * @return the number of write requests and read requests and coprocessor - * service requests made by the user + * @return the number of write requests and read requests and coprocessor service requests made by + * the user */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java index 70d28883c269..8b906c6c70f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.util.Strings; import org.apache.yetus.audience.InterfaceAudience; @@ -35,18 +31,19 @@ public final class UserMetricsBuilder { public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) { UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes()); - userLoad.getClientMetricsList().stream().map( - clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), + userLoad.getClientMetricsList().stream() + .map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), - clientMetrics.getFilteredRequestsCount())).forEach(builder::addClientMetris); + clientMetrics.getFilteredRequestsCount())) + .forEach(builder::addClientMetris); return builder.build(); } public static ClusterStatusProtos.UserLoad toUserMetrics(UserMetrics userMetrics) { ClusterStatusProtos.UserLoad.Builder builder = ClusterStatusProtos.UserLoad.newBuilder().setUserName(userMetrics.getNameAsString()); - userMetrics.getClientMetrics().values().stream().map( - clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() + userMetrics.getClientMetrics().values().stream() + .map(clientMetrics -> ClusterStatusProtos.ClientMetrics.newBuilder() .setHostName(clientMetrics.getHostName()) .setWriteRequestsCount(clientMetrics.getWriteRequestsCount()) .setReadRequestsCount(clientMetrics.getReadRequestsCount()) @@ -59,9 +56,9 @@ public static UserMetricsBuilder newBuilder(byte[] name) { return new UserMetricsBuilder(name); } - private final byte[] name; private Map clientMetricsMap = new HashMap<>(); + private UserMetricsBuilder(byte[] name) { this.name = name; } @@ -89,19 +86,23 @@ public ClientMetricsImpl(String hostName, long readRequest, long writeRequest, this.filteredReadRequestsCount = filteredReadRequestsCount; } - @Override public String getHostName() { + @Override + public String getHostName() { return hostName; } - @Override public long getReadRequestsCount() { + @Override + public long getReadRequestsCount() { return readRequestCount; } - @Override public long getWriteRequestsCount() { + @Override + public long getWriteRequestsCount() { return writeRequestCount; } - @Override public long getFilteredReadRequestsCount() { + @Override + public long getFilteredReadRequestsCount() { return filteredReadRequestsCount; } } @@ -115,33 +116,38 @@ private static class UserMetricsImpl implements UserMetrics { this.clientMetricsMap = clientMetricsMap; } - @Override public byte[] getUserName() { + @Override + public byte[] getUserName() { return name; } - @Override public long getReadRequestCount() { - return clientMetricsMap.values().stream().map(c -> c.getReadRequestsCount()) - .reduce(0L, Long::sum); + @Override + public long getReadRequestCount() { + return clientMetricsMap.values().stream().map(c -> c.getReadRequestsCount()).reduce(0L, + Long::sum); } - @Override public long getWriteRequestCount() { - return clientMetricsMap.values().stream().map(c -> c.getWriteRequestsCount()) - .reduce(0L, Long::sum); + @Override + public long getWriteRequestCount() { + return clientMetricsMap.values().stream().map(c -> c.getWriteRequestsCount()).reduce(0L, + Long::sum); } - @Override public Map getClientMetrics() { + @Override + public Map getClientMetrics() { return this.clientMetricsMap; } - @Override public long getFilteredReadRequests() { + @Override + public long getFilteredReadRequests() { return clientMetricsMap.values().stream().map(c -> c.getFilteredReadRequestsCount()) .reduce(0L, Long::sum); } @Override public String toString() { - StringBuilder sb = Strings - .appendKeyValue(new StringBuilder(), "readRequestCount", this.getReadRequestCount()); + StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "readRequestCount", + this.getReadRequestCount()); Strings.appendKeyValue(sb, "writeRequestCount", this.getWriteRequestCount()); Strings.appendKeyValue(sb, "filteredReadRequestCount", this.getFilteredReadRequests()); return sb.toString(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java index 4dc44b4c3c69..fa02eadab48e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +18,6 @@ package org.apache.hadoop.hbase; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -42,7 +40,6 @@ public ZooKeeperConnectionException(String message) { /** * Constructor taking another exception. - * * @param message the message for this exception * @param exception the exception to grab data from */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java index 92b046436258..48cec12f43c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractClientScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.metrics.ScanMetrics; +import org.apache.yetus.audience.InterfaceAudience; /** * Helper class for custom client scanners. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java index 9e33a12af6b5..9fb2b1cfe118 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,10 +27,10 @@ abstract class AbstractResponse { public enum ResponseType { - SINGLE (0), - MULTI (1); + SINGLE(0), MULTI(1); - ResponseType(int value) {} + ResponseType(int value) { + } } public abstract ResponseType type(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 54138d30516c..f753bdc8845e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -90,11 +90,11 @@ abstract class AbstractRpcBasedConnectionRegistry implements ConnectionRegistry private final RegistryEndpointsRefresher registryEndpointRefresher; protected AbstractRpcBasedConnectionRegistry(Configuration conf, - String hedgedReqsFanoutConfigName, String initialRefreshDelaySecsConfigName, - String refreshIntervalSecsConfigName, String minRefreshIntervalSecsConfigName) - throws IOException { + String hedgedReqsFanoutConfigName, String initialRefreshDelaySecsConfigName, + String refreshIntervalSecsConfigName, String minRefreshIntervalSecsConfigName) + throws IOException { this.hedgedReadFanOut = - Math.max(1, conf.getInt(hedgedReqsFanoutConfigName, HEDGED_REQS_FANOUT_DEFAULT)); + Math.max(1, conf.getInt(hedgedReqsFanoutConfigName, HEDGED_REQS_FANOUT_DEFAULT)); rpcTimeoutMs = (int) Math.min(Integer.MAX_VALUE, conf.getLong(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); // XXX: we pass cluster id as null here since we do not have a cluster id yet, we have to fetch @@ -105,8 +105,8 @@ protected AbstractRpcBasedConnectionRegistry(Configuration conf, populateStubs(getBootstrapNodes(conf)); // could return null here is refresh interval is less than zero registryEndpointRefresher = - RegistryEndpointsRefresher.create(conf, initialRefreshDelaySecsConfigName, - refreshIntervalSecsConfigName, minRefreshIntervalSecsConfigName, this::refreshStubs); + RegistryEndpointsRefresher.create(conf, initialRefreshDelaySecsConfigName, + refreshIntervalSecsConfigName, minRefreshIntervalSecsConfigName, this::refreshStubs); } protected abstract Set getBootstrapNodes(Configuration conf) throws IOException; @@ -120,7 +120,7 @@ private void refreshStubs() throws IOException { private void populateStubs(Set addrs) throws IOException { Preconditions.checkNotNull(addrs); ImmutableMap.Builder builder = - ImmutableMap.builderWithExpectedSize(addrs.size()); + ImmutableMap.builderWithExpectedSize(addrs.size()); User user = User.getCurrent(); for (ServerName masterAddr : addrs) { builder.put(masterAddr, @@ -144,7 +144,7 @@ protected interface Callable { } private CompletableFuture call(ClientMetaService.Interface stub, - Callable callable) { + Callable callable) { HBaseRpcController controller = rpcControllerFactory.newController(); CompletableFuture future = new CompletableFuture<>(); callable.call(controller, stub, resp -> { @@ -174,8 +174,8 @@ private IOException badResponse(String debug) { * points have been tried and all of them are failed, we will fail the future. */ private void groupCall(CompletableFuture future, Set servers, - List stubs, int startIndexInclusive, Callable callable, - Predicate isValidResp, String debug, ConcurrentLinkedQueue errors) { + List stubs, int startIndexInclusive, Callable callable, + Predicate isValidResp, String debug, ConcurrentLinkedQueue errors) { int endIndexExclusive = Math.min(startIndexInclusive + hedgedReadFanOut, stubs.size()); AtomicInteger remaining = new AtomicInteger(endIndexExclusive - startIndexInclusive); for (int i = startIndexInclusive; i < endIndexExclusive; i++) { @@ -194,7 +194,7 @@ private void groupCall(CompletableFuture future, Set(errors)); + new RetriesExhaustedException("masters", stubs.size(), new ArrayList<>(errors)); future.completeExceptionally(new MasterRegistryFetchException(servers, ex)); } else { groupCall(future, servers, stubs, endIndexExclusive, callable, isValidResp, debug, @@ -210,7 +210,7 @@ private void groupCall(CompletableFuture future, Set CompletableFuture call(Callable callable, - Predicate isValidResp, String debug) { + Predicate isValidResp, String debug) { ImmutableMap addr2StubRef = addr2Stub; Set servers = addr2StubRef.keySet(); List stubs = new ArrayList<>(addr2StubRef.values()); @@ -222,7 +222,7 @@ protected final CompletableFuture call(Callable callab } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/src/test/.*") + allowedOnPath = ".*/src/test/.*") Set getParsedServers() { return addr2Stub.keySet(); } @@ -233,7 +233,7 @@ Set getParsedServers() { private static RegionLocations transformMetaRegionLocations(GetMetaRegionLocationsResponse resp) { List regionLocations = new ArrayList<>(); resp.getMetaLocationsList() - .forEach(location -> regionLocations.add(ProtobufUtil.toRegionLocation(location))); + .forEach(location -> regionLocations.add(ProtobufUtil.toRegionLocation(location))); return new RegionLocations(regionLocations); } @@ -241,12 +241,11 @@ private static RegionLocations transformMetaRegionLocations(GetMetaRegionLocatio public CompletableFuture getMetaRegionLocations() { return tracedFuture( () -> this - . call( - (c, s, d) -> s.getMetaRegionLocations(c, - GetMetaRegionLocationsRequest.getDefaultInstance(), d), - r -> r.getMetaLocationsCount() != 0, - "getMetaLocationsCount") - .thenApply(AbstractRpcBasedConnectionRegistry::transformMetaRegionLocations), + . call( + (c, s, d) -> s.getMetaRegionLocations(c, + GetMetaRegionLocationsRequest.getDefaultInstance(), d), + r -> r.getMetaLocationsCount() != 0, "getMetaLocationsCount") + .thenApply(AbstractRpcBasedConnectionRegistry::transformMetaRegionLocations), getClass().getSimpleName() + ".getMetaRegionLocations"); } @@ -254,10 +253,10 @@ public CompletableFuture getMetaRegionLocations() { public CompletableFuture getClusterId() { return tracedFuture( () -> this - . call( - (c, s, d) -> s.getClusterId(c, GetClusterIdRequest.getDefaultInstance(), d), - GetClusterIdResponse::hasClusterId, "getClusterId()") - .thenApply(GetClusterIdResponse::getClusterId), + . call( + (c, s, d) -> s.getClusterId(c, GetClusterIdRequest.getDefaultInstance(), d), + GetClusterIdResponse::hasClusterId, "getClusterId()") + .thenApply(GetClusterIdResponse::getClusterId), getClass().getSimpleName() + ".getClusterId"); } @@ -265,10 +264,10 @@ public CompletableFuture getClusterId() { public CompletableFuture getActiveMaster() { return tracedFuture( () -> this - .call( - (c, s, d) -> s.getActiveMaster(c, GetActiveMasterRequest.getDefaultInstance(), d), - GetActiveMasterResponse::hasServerName, "getActiveMaster()") - .thenApply(resp -> ProtobufUtil.toServerName(resp.getServerName())), + . call( + (c, s, d) -> s.getActiveMaster(c, GetActiveMasterRequest.getDefaultInstance(), d), + GetActiveMasterResponse::hasServerName, "getActiveMaster()") + .thenApply(resp -> ProtobufUtil.toServerName(resp.getServerName())), getClass().getSimpleName() + ".getActiveMaster"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java index 4496a9e98558..7c02dab277f1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,9 +21,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by - * {@link Table#batch} to associate the action with it's region and maintain - * the index from the original request. + * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by + * {@link Table#batch} to associate the action with it's region and maintain the index from the + * original request. */ @InterfaceAudience.Private public class Action implements Comparable { @@ -76,7 +75,9 @@ public int getReplicaId() { return replicaId; } - public int getPriority() { return priority; } + public int getPriority() { + return priority; + } @Override public int compareTo(Action other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 0be8173dcdd5..c9448c4357a5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,11 +72,11 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; /** - * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and - * call {@link #close()} when done. - *

    Admin can be used to create, drop, list, enable and disable and otherwise modify tables, - * as well as perform other administrative operations. - * + * The administrative API for HBase. Obtain an instance from {@link Connection#getAdmin()} and call + * {@link #close()} when done. + *

    + * Admin can be used to create, drop, list, enable and disable and otherwise modify tables, as well + * as perform other administrative operations. * @see ConnectionFactory * @see Connection * @see Table @@ -126,11 +126,10 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables. - * * @return an array of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableDescriptors()}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableDescriptors()}. * @see #listTableDescriptors() */ @Deprecated @@ -138,7 +137,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables. - * * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ @@ -146,13 +144,12 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables that match the given pattern. - * * @param pattern The compiled regular expression to match against * @return an array of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs * @see #listTables() - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableDescriptors(java.util.regex.Pattern)}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableDescriptors(java.util.regex.Pattern)}. * @see #listTableDescriptors(Pattern) */ @Deprecated @@ -160,7 +157,6 @@ public interface Admin extends Abortable, Closeable { /** * List all the userspace tables that match the given pattern. - * * @param pattern The compiled regular expression to match against * @return a list of TableDescriptors * @throws IOException if a remote or network exception occurs @@ -172,7 +168,6 @@ default List listTableDescriptors(Pattern pattern) throws IOExc /** * List all the userspace tables matching the given regular expression. - * * @param regex The regular expression to match against * @return a list of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs @@ -185,23 +180,20 @@ default List listTableDescriptors(Pattern pattern) throws IOExc /** * List all the tables matching the given pattern. - * * @param pattern The compiled regular expression to match against * @param includeSysTables false to match only against userspace tables * @return an array of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs * @see #listTables() - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableDescriptors(java.util.regex.Pattern, boolean)}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableDescriptors(java.util.regex.Pattern, boolean)}. * @see #listTableDescriptors(java.util.regex.Pattern, boolean) */ @Deprecated - HTableDescriptor[] listTables(Pattern pattern, boolean includeSysTables) - throws IOException; + HTableDescriptor[] listTables(Pattern pattern, boolean includeSysTables) throws IOException; /** * List all the tables matching the given pattern. - * * @param pattern The compiled regular expression to match against * @param includeSysTables false to match only against userspace tables * @return a list of TableDescriptors @@ -213,22 +205,19 @@ List listTableDescriptors(Pattern pattern, boolean includeSysTa /** * List all the tables matching the given pattern. - * * @param regex The regular expression to match against * @param includeSysTables false to match only against userspace tables * @return an array of read-only HTableDescriptors * @throws IOException if a remote or network exception occurs * @see #listTables(java.util.regex.Pattern, boolean) - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #listTableDescriptors(Pattern, boolean)}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #listTableDescriptors(Pattern, boolean)}. */ @Deprecated - HTableDescriptor[] listTables(String regex, boolean includeSysTables) - throws IOException; + HTableDescriptor[] listTables(String regex, boolean includeSysTables) throws IOException; /** * List all of the names of userspace tables. - * * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ @@ -262,8 +251,7 @@ default TableName[] listTableNames(Pattern pattern) throws IOException { * @return TableName[] table names * @throws IOException if a remote or network exception occurs */ - TableName[] listTableNames(Pattern pattern, boolean includeSysTables) - throws IOException; + TableName[] listTableNames(Pattern pattern, boolean includeSysTables) throws IOException; /** * List all of the names of userspace tables. @@ -275,18 +263,16 @@ TableName[] listTableNames(Pattern pattern, boolean includeSysTables) * {@link #listTableNames(Pattern, boolean)} instead. */ @Deprecated - TableName[] listTableNames(String regex, boolean includeSysTables) - throws IOException; + TableName[] listTableNames(String regex, boolean includeSysTables) throws IOException; /** * Get a table descriptor. - * * @param tableName as a {@link TableName} * @return the read-only tableDescriptor * @throws org.apache.hadoop.hbase.TableNotFoundException * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #getDescriptor(TableName)}. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #getDescriptor(TableName)}. */ @Deprecated HTableDescriptor getTableDescriptor(TableName tableName) @@ -294,23 +280,21 @@ HTableDescriptor getTableDescriptor(TableName tableName) /** * Get a table descriptor. - * * @param tableName as a {@link TableName} * @return the tableDescriptor * @throws org.apache.hadoop.hbase.TableNotFoundException * @throws IOException if a remote or network exception occurs */ - TableDescriptor getDescriptor(TableName tableName) - throws TableNotFoundException, IOException; + TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException; /** * Creates a new table. Synchronous operation. - * * @param desc table descriptor for table * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). + * threads, the table may have been created between test-for-existence and + * attempt-at-creation). * @throws IOException if a remote or network exception occurs */ default void createTable(TableDescriptor desc) throws IOException { @@ -318,12 +302,11 @@ default void createTable(TableDescriptor desc) throws IOException { } /** - * Creates a new table with the specified number of regions. The start key specified will become + * Creates a new table with the specified number of regions. The start key specified will become * the end key of the first region of the table, and the end key specified will become the start * key of the last region of the table (the first region has a null start key and the last region * has a null end key). BigInteger math will be used to divide the key range specified into enough * segments to make the required number of total regions. Synchronous operation. - * * @param desc table descriptor for table * @param startKey beginning of key range * @param endKey end of key range @@ -332,7 +315,8 @@ default void createTable(TableDescriptor desc) throws IOException { * @throws IOException if a remote or network exception occurs * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). + * threads, the table may have been created between test-for-existence and + * attempt-at-creation). */ void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRegions) throws IOException; @@ -341,14 +325,14 @@ void createTable(TableDescriptor desc, byte[] startKey, byte[] endKey, int numRe * Creates a new table with an initial set of empty regions defined by the specified split keys. * The total number of regions created will be the number of split keys plus one. Synchronous * operation. Note : Avoid passing empty split key. - * * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table * @throws IllegalArgumentException if the table name is reserved, if the split keys are repeated - * and if the split key has empty byte array. + * and if the split key has empty byte array. * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If concurrent - * threads, the table may have been created between test-for-existence and attempt-at-creation). + * threads, the table may have been created between test-for-existence and + * attempt-at-creation). * @throws IOException if a remote or network exception occurs */ default void createTable(TableDescriptor desc, byte[][] splitKeys) throws IOException { @@ -396,61 +380,52 @@ default void deleteTable(TableName tableName) throws IOException { } /** - * Deletes the table but does not block and wait for it to be completely removed. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Deletes the table but does not block and wait for it to be completely removed. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async delete. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async delete. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future deleteTableAsync(TableName tableName) throws IOException; /** * Deletes tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(Pattern)} - * and {@link #deleteTable(org.apache.hadoop.hbase.TableName)} - * + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(Pattern)} and + * {@link #deleteTable(org.apache.hadoop.hbase.TableName)} * @param regex The regular expression to match table names against - * @return Table descriptors for tables that couldn't be deleted. - * The return htds are read-only + * @return Table descriptors for tables that couldn't be deleted. The return htds are read-only * @throws IOException if a remote or network exception occurs * @see #deleteTables(java.util.regex.Pattern) * @see #deleteTable(org.apache.hadoop.hbase.TableName) - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(Pattern)} - * and {@link #deleteTable(TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using {@link #listTableDescriptors(Pattern)} and + * {@link #deleteTable(TableName)} */ @Deprecated HTableDescriptor[] deleteTables(String regex) throws IOException; /** * Delete tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(java.util.regex.Pattern)} and + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and * {@link #deleteTable(org.apache.hadoop.hbase.TableName)} - * * @param pattern The pattern to match table names against - * @return Table descriptors for tables that couldn't be deleted - * The return htds are read-only + * @return Table descriptors for tables that couldn't be deleted The return htds are read-only * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)} - * and {@link #deleteTable(TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and + * {@link #deleteTable(TableName)} */ @Deprecated HTableDescriptor[] deleteTables(Pattern pattern) throws IOException; /** - * Truncate a table. - * Synchronous operation. - * + * Truncate a table. Synchronous operation. * @param tableName name of table to truncate * @param preserveSplits true if the splits should be preserved * @throws IOException if a remote or network exception occurs @@ -470,8 +445,7 @@ default void truncateTable(TableName tableName, boolean preserveSplits) throws I * @return the result of the async truncate. You can use Future.get(long, TimeUnit) to wait on the * operation to complete. */ - Future truncateTableAsync(TableName tableName, boolean preserveSplits) - throws IOException; + Future truncateTableAsync(TableName tableName, boolean preserveSplits) throws IOException; /** * Enable a table. May timeout. Use {@link #enableTableAsync(org.apache.hadoop.hbase.TableName)} @@ -490,67 +464,59 @@ default void enableTable(TableName tableName) throws IOException { } /** - * Enable the table but does not block and wait for it to be completely enabled. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Enable the table but does not block and wait for it to be completely enabled. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async enable. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async enable. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future enableTableAsync(TableName tableName) throws IOException; /** * Enable tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(Pattern)} and {@link #enableTable(org.apache.hadoop.hbase.TableName)} - * + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(Pattern)} and + * {@link #enableTable(org.apache.hadoop.hbase.TableName)} * @param regex The regular expression to match table names against * @throws IOException if a remote or network exception occurs - * @return Table descriptors for tables that couldn't be enabled. - * The return HTDs are read-only. + * @return Table descriptors for tables that couldn't be enabled. The return HTDs are read-only. * @see #enableTables(java.util.regex.Pattern) * @see #enableTable(org.apache.hadoop.hbase.TableName) - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(Pattern)} - * and {@link #enableTable(org.apache.hadoop.hbase.TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using {@link #listTableDescriptors(Pattern)} and + * {@link #enableTable(org.apache.hadoop.hbase.TableName)} */ @Deprecated HTableDescriptor[] enableTables(String regex) throws IOException; /** * Enable tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(java.util.regex.Pattern)} and + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and * {@link #enableTable(org.apache.hadoop.hbase.TableName)} - * * @param pattern The pattern to match table names against * @throws IOException if a remote or network exception occurs - * @return Table descriptors for tables that couldn't be enabled. - * The return HTDs are read-only. - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)} - * and {@link #enableTable(org.apache.hadoop.hbase.TableName)} + * @return Table descriptors for tables that couldn't be enabled. The return HTDs are read-only. + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and + * {@link #enableTable(org.apache.hadoop.hbase.TableName)} */ @Deprecated HTableDescriptor[] enableTables(Pattern pattern) throws IOException; /** - * Disable the table but does not block and wait for it to be completely disabled. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Disable the table but does not block and wait for it to be completely disabled. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table to delete * @throws IOException if a remote or network exception occurs - * @return the result of the async disable. You can use Future.get(long, TimeUnit) - * to wait on the operation to complete. + * @return the result of the async disable. You can use Future.get(long, TimeUnit) to wait on the + * operation to complete. */ Future disableTableAsync(TableName tableName) throws IOException; @@ -569,37 +535,33 @@ default void disableTable(TableName tableName) throws IOException { /** * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(Pattern)} and {@link #disableTable(org.apache.hadoop.hbase.TableName)} - * + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(Pattern)} and + * {@link #disableTable(org.apache.hadoop.hbase.TableName)} * @param regex The regular expression to match table names against - * @return Table descriptors for tables that couldn't be disabled - * The return htds are read-only + * @return Table descriptors for tables that couldn't be disabled The return htds are read-only * @throws IOException if a remote or network exception occurs * @see #disableTables(java.util.regex.Pattern) * @see #disableTable(org.apache.hadoop.hbase.TableName) - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(Pattern)} - * and {@link #disableTable(org.apache.hadoop.hbase.TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using {@link #listTableDescriptors(Pattern)} and + * {@link #disableTable(org.apache.hadoop.hbase.TableName)} */ @Deprecated HTableDescriptor[] disableTables(String regex) throws IOException; /** * Disable tables matching the passed in pattern and wait on completion. Warning: Use this method - * carefully, there is no prompting and the effect is immediate. Consider using {@link - * #listTableDescriptors(java.util.regex.Pattern)} and + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and * {@link #disableTable(org.apache.hadoop.hbase.TableName)} - * * @param pattern The pattern to match table names against - * @return Table descriptors for tables that couldn't be disabled - * The return htds are read-only + * @return Table descriptors for tables that couldn't be disabled The return htds are read-only * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version - * This is just a trivial helper method without any magic. - * Consider using {@link #listTableDescriptors(java.util.regex.Pattern)} - * and {@link #disableTable(org.apache.hadoop.hbase.TableName)} + * @deprecated since 2.0 version and will be removed in 3.0 version This is just a trivial helper + * method without any magic. Consider using + * {@link #listTableDescriptors(java.util.regex.Pattern)} and + * {@link #disableTable(org.apache.hadoop.hbase.TableName)} */ @Deprecated HTableDescriptor[] disableTables(Pattern pattern) throws IOException; @@ -629,7 +591,6 @@ default void disableTable(TableName tableName) throws IOException { * Use this api to check if the table has been created with the specified number of splitkeys * which was used while creating the given table. Note : If this api is used after a table's * region gets splitted, the api may return false. - * * @param tableName name of table to check * @param splitKeys keys to check if the table has been created with all split keys * @throws IOException if a remote or network excpetion occurs @@ -641,13 +602,12 @@ default void disableTable(TableName tableName) throws IOException { /** * Get the status of an alter (a.k.a modify) command - indicates how * many regions have received the updated schema Asynchronous operation. - * * @param tableName TableName instance * @return Pair indicating the number of regions updated Pair.getFirst() is the regions that are - * yet to be updated Pair.getSecond() is the total number of regions of the table + * yet to be updated Pair.getSecond() is the total number of regions of the table * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. No longer needed now you get a Future - * on an operation. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. No longer needed now you get a Future on an + * operation. */ @Deprecated Pair getAlterStatus(TableName tableName) throws IOException; @@ -655,40 +615,36 @@ default void disableTable(TableName tableName) throws IOException { /** * Get the status of alter (a.k.a modify) command - indicates how many * regions have received the updated schema Asynchronous operation. - * * @param tableName name of the table to get the status of * @return Pair indicating the number of regions updated Pair.getFirst() is the regions that are - * yet to be updated Pair.getSecond() is the total number of regions of the table + * yet to be updated Pair.getSecond() is the total number of regions of the table * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. No longer needed now you get a Future - * on an operation. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. No longer needed now you get a Future on an + * operation. */ @Deprecated Pair getAlterStatus(byte[] tableName) throws IOException; /** - * Add a column family to an existing table. Synchronous operation. - * Use {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it - * returns a {@link Future} from which you can learn whether success or failure. - * + * Add a column family to an existing table. Synchronous operation. Use + * {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns a + * {@link Future} from which you can learn whether success or failure. * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0. - * This will be removed in HBase 3.0.0. - * Use {@link #addColumnFamily(TableName, ColumnFamilyDescriptor)}. + * @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0. Use + * {@link #addColumnFamily(TableName, ColumnFamilyDescriptor)}. */ @Deprecated default void addColumn(TableName tableName, ColumnFamilyDescriptor columnFamily) - throws IOException { + throws IOException { addColumnFamily(tableName, columnFamily); } /** - * Add a column family to an existing table. Synchronous operation. - * Use {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it - * returns a {@link Future} from which you can learn whether success or failure. - * + * Add a column family to an existing table. Synchronous operation. Use + * {@link #addColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns a + * {@link Future} from which you can learn whether success or failure. * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs @@ -699,12 +655,10 @@ default void addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnF } /** - * Add a column family to an existing table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Add a column family to an existing table. Asynchronous operation. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added * @throws IOException if a remote or network exception occurs @@ -715,24 +669,22 @@ Future addColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor co throws IOException; /** - * Delete a column family from a table. Synchronous operation. - * Use {@link #deleteColumnFamily(TableName, byte[])} instead because it - * returns a {@link Future} from which you can learn whether success or failure. - * + * Delete a column family from a table. Synchronous operation. Use + * {@link #deleteColumnFamily(TableName, byte[])} instead because it returns a {@link Future} from + * which you can learn whether success or failure. * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0. - * This will be removed in HBase 3.0.0. - * Use {@link #deleteColumnFamily(TableName, byte[])}}. + * @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0. Use + * {@link #deleteColumnFamily(TableName, byte[])}}. */ @Deprecated void deleteColumn(TableName tableName, byte[] columnFamily) throws IOException; /** - * Delete a column family from a table. Synchronous operation. - * Use {@link #deleteColumnFamily(TableName, byte[])} instead because it - * returns a {@link Future} from which you can learn whether success or failure. + * Delete a column family from a table. Synchronous operation. Use + * {@link #deleteColumnFamily(TableName, byte[])} instead because it returns a {@link Future} from + * which you can learn whether success or failure. * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs @@ -743,20 +695,17 @@ default void deleteColumnFamily(TableName tableName, byte[] columnFamily) throws } /** - * Delete a column family from a table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Delete a column family from a table. Asynchronous operation. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. * @param tableName name of table * @param columnFamily name of column family to be deleted * @throws IOException if a remote or network exception occurs * @return the result of the async delete column family. You can use Future.get(long, TimeUnit) to * wait on the operation to complete. */ - Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) - throws IOException; + Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) throws IOException; /** * Modify an existing column family on a table. Synchronous operation. Use @@ -765,9 +714,8 @@ Future deleteColumnFamilyAsync(TableName tableName, byte[] columnFamily) * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0. - * This will be removed in HBase 3.0.0. - * Use {@link #modifyColumnFamily(TableName, ColumnFamilyDescriptor)}. + * @deprecated As of release 2.0.0. This will be removed in HBase 3.0.0. Use + * {@link #modifyColumnFamily(TableName, ColumnFamilyDescriptor)}. */ @Deprecated default void modifyColumn(TableName tableName, ColumnFamilyDescriptor columnFamily) @@ -776,9 +724,9 @@ default void modifyColumn(TableName tableName, ColumnFamilyDescriptor columnFami } /** - * Modify an existing column family on a table. Synchronous operation. - * Use {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it - * returns a {@link Future} from which you can learn whether success or failure. + * Modify an existing column family on a table. Synchronous operation. Use + * {@link #modifyColumnFamilyAsync(TableName, ColumnFamilyDescriptor)} instead because it returns + * a {@link Future} from which you can learn whether success or failure. * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs @@ -790,12 +738,10 @@ default void modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor colu } /** - * Modify an existing column family on a table. Asynchronous operation. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Modify an existing column family on a table. Asynchronous operation. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table * @param columnFamily new column family descriptor to use * @throws IOException if a remote or network exception occurs @@ -813,7 +759,7 @@ Future modifyColumnFamilyAsync(TableName tableName, ColumnFamilyDescriptor * @throws IOException if a remote or network exception occurs */ default void modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, String dstSFT) - throws IOException { + throws IOException { get(modifyColumnFamilyStoreFileTrackerAsync(tableName, family, dstSFT), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -828,44 +774,41 @@ default void modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] fami * @throws IOException if a remote or network exception occurs */ Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family, - String dstSFT) throws IOException; + String dstSFT) throws IOException; /** * Uses {@link #unassign(byte[], boolean)} to unassign the region. For expert-admins. - * * @param regionname region name to close * @param serverName Deprecated. Not used. * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #unassign(byte[], boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #unassign(byte[], boolean)}. */ @Deprecated void closeRegion(String regionname, String serverName) throws IOException; /** * Uses {@link #unassign(byte[], boolean)} to unassign the region. For expert-admins. - * * @param regionname region name to close * @param serverName Deprecated. Not used. * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #unassign(byte[], boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #unassign(byte[], boolean)}. */ @Deprecated void closeRegion(byte[] regionname, String serverName) throws IOException; /** * Uses {@link #unassign(byte[], boolean)} to unassign the region. For expert-admins. - * * @param encodedRegionName The encoded region name; i.e. the hash that makes up the region name - * suffix: e.g. if regionname is - * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., - * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. + * suffix: e.g. if regionname is + * TestTable,0094429456,1289497600452.527db22f95c8a9e0116f0cc13c680396., + * then the encoded region name is: 527db22f95c8a9e0116f0cc13c680396. * @param serverName Deprecated. Not used. * @return Deprecated. Returns true always. * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #unassign(byte[], boolean)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #unassign(byte[], boolean)}. */ @Deprecated boolean closeRegionWithEncodedRegionName(String encodedRegionName, String serverName) @@ -873,12 +816,11 @@ boolean closeRegionWithEncodedRegionName(String encodedRegionName, String server /** * Used {@link #unassign(byte[], boolean)} to unassign the region. For expert-admins. - * * @param sn Deprecated. Not used. * @throws IOException if a remote or network exception occurs * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-18231). - * Use {@link #unassign(byte[], boolean)}. + * (HBASE-18231). Use + * {@link #unassign(byte[], boolean)}. */ @Deprecated void closeRegion(final ServerName sn, final HRegionInfo hri) throws IOException; @@ -887,15 +829,14 @@ boolean closeRegionWithEncodedRegionName(String encodedRegionName, String server * Get all the online regions on a region server. * @throws IOException if a remote or network exception occurs * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-17980). - * Use {@link #getRegions(ServerName sn)}. + * (HBASE-17980). Use + * {@link #getRegions(ServerName sn)}. */ @Deprecated List getOnlineRegions(ServerName sn) throws IOException; /** * Get all the online regions on a region server. - * * @return List of {@link RegionInfo} * @throws IOException if a remote or network exception occurs */ @@ -903,16 +844,14 @@ boolean closeRegionWithEncodedRegionName(String encodedRegionName, String server /** * Flush a table. Synchronous operation. - * * @param tableName table to flush * @throws IOException if a remote or network exception occurs */ void flush(TableName tableName) throws IOException; /** - * Flush the specified column family stores on all regions of the passed table. - * This runs as a synchronous operation. - * + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. * @param tableName table to flush * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs @@ -921,7 +860,6 @@ boolean closeRegionWithEncodedRegionName(String encodedRegionName, String server /** * Flush an individual region. Synchronous operation. - * * @param regionName region to flush * @throws IOException if a remote or network exception occurs */ @@ -929,7 +867,6 @@ boolean closeRegionWithEncodedRegionName(String encodedRegionName, String server /** * Flush a column family within a region. Synchronous operation. - * * @param regionName region to flush * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs @@ -944,10 +881,8 @@ boolean closeRegionWithEncodedRegionName(String encodedRegionName, String server void flushRegionServer(ServerName serverName) throws IOException; /** - * Compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compact a table. Asynchronous operation in that this method requests that a Compaction run and + * then it returns. It does not wait on the completion of Compaction (it can take a while). * @param tableName table to compact * @throws IOException if a remote or network exception occurs */ @@ -955,9 +890,8 @@ boolean closeRegionWithEncodedRegionName(String encodedRegionName, String server /** * Compact an individual region. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compaction run and then it returns. It does not wait on the completion of Compaction (it can + * take a while). * @param regionName region to compact * @throws IOException if a remote or network exception occurs */ @@ -965,46 +899,39 @@ boolean closeRegionWithEncodedRegionName(String encodedRegionName, String server /** * Compact a column family within a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). * @param tableName table to compact * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ - void compact(TableName tableName, byte[] columnFamily) - throws IOException; + void compact(TableName tableName, byte[] columnFamily) throws IOException; /** * Compact a column family within a region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). * @param regionName region to compact * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ - void compactRegion(byte[] regionName, byte[] columnFamily) - throws IOException; + void compactRegion(byte[] regionName, byte[] columnFamily) throws IOException; /** - * Compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Compact a table. Asynchronous operation in that this method requests that a Compaction run and + * then it returns. It does not wait on the completion of Compaction (it can take a while). * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ void compact(TableName tableName, CompactType compactType) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** - * Compact a column family within a table. Asynchronous operation in that this method - * requests that a Compaction run and then it returns. It does not wait on the - * completion of Compaction (it can take a while). - * + * Compact a column family within a table. Asynchronous operation in that this method requests + * that a Compaction run and then it returns. It does not wait on the completion of Compaction (it + * can take a while). * @param tableName table to compact * @param columnFamily column family within a table * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} @@ -1012,70 +939,62 @@ void compact(TableName tableName, CompactType compactType) * @throws InterruptedException */ void compact(TableName tableName, byte[] columnFamily, CompactType compactType) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** - * Major compact a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table. Asynchronous operation in that this method requests that a Compaction + * run and then it returns. It does not wait on the completion of Compaction (it can take a + * while). * @param tableName table to major compact * @throws IOException if a remote or network exception occurs */ void majorCompact(TableName tableName) throws IOException; /** - * Major compact a table or an individual region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table or an individual region. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param regionName region to major compact * @throws IOException if a remote or network exception occurs */ void majorCompactRegion(byte[] regionName) throws IOException; /** - * Major compact a column family within a table. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a column family within a table. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param tableName table to major compact * @param columnFamily column family within a table * @throws IOException if a remote or network exception occurs */ - void majorCompact(TableName tableName, byte[] columnFamily) - throws IOException; + void majorCompact(TableName tableName, byte[] columnFamily) throws IOException; /** - * Major compact a column family within region. Asynchronous operation in that this method requests - * that a Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a column family within region. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param regionName egion to major compact * @param columnFamily column family within a region * @throws IOException if a remote or network exception occurs */ - void majorCompactRegion(byte[] regionName, byte[] columnFamily) - throws IOException; + void majorCompactRegion(byte[] regionName, byte[] columnFamily) throws IOException; /** - * Major compact a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a table. Asynchronous operation in that this method requests that a Compaction + * run and then it returns. It does not wait on the completion of Compaction (it can take a + * while). * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @throws IOException if a remote or network exception occurs * @throws InterruptedException */ void majorCompact(TableName tableName, CompactType compactType) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** - * Major compact a column family within a table. Asynchronous operation in that this method requests that a - * Compaction run and then it returns. It does not wait on the completion of Compaction - * (it can take a while). - * + * Major compact a column family within a table. Asynchronous operation in that this method + * requests that a Compaction run and then it returns. It does not wait on the completion of + * Compaction (it can take a while). * @param tableName table to compact * @param columnFamily column family within a table * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} @@ -1083,7 +1002,7 @@ void majorCompact(TableName tableName, CompactType compactType) * @throws InterruptedException */ void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactType) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** * Compact all regions on the region server. Asynchronous operation in that this method requests @@ -1098,8 +1017,8 @@ void majorCompact(TableName tableName, byte[] columnFamily, CompactType compactT * {@link #majorCompactRegionServer(ServerName)}. */ @Deprecated - default void compactRegionServer(ServerName sn, boolean major) throws IOException, - InterruptedException { + default void compactRegionServer(ServerName sn, boolean major) + throws IOException, InterruptedException { if (major) { majorCompactRegionServer(sn); } else { @@ -1109,11 +1028,10 @@ default void compactRegionServer(ServerName sn, boolean major) throws IOExceptio /** * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing - * compactions. This state is ephemeral. The setting will be lost on restart. Compaction - * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled - * in hbase-site.xml. - * - * @param switchState Set to true to enable, false to disable. + * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also + * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in + * hbase-site.xml. + * @param switchState Set to true to enable, false to disable. * @param serverNamesList list of region servers. * @return Previous compaction states for region servers */ @@ -1159,8 +1077,8 @@ Map compactionSwitch(boolean switchState, List serv * startcode. Here is an example: host187.example.com,60020,1289493121758 * @throws IOException if we can't find a region named encodedRegionName * @deprecated since 2.2.0 and will be removed in 4.0.0. Use {@link #move(byte[], ServerName)} - * instead. And if you want to move the region to a random server, please use - * {@link #move(byte[])}. + * instead. And if you want to move the region to a random server, please use + * {@link #move(byte[])}. * @see HBASE-22108 */ @Deprecated @@ -1200,16 +1118,15 @@ default void move(byte[] encodedRegionName, byte[] destServerName) throws IOExce void unassign(byte[] regionName) throws IOException; /** - * Unassign a region from current hosting regionserver. Region will then be assigned to a - * regionserver chosen at random. Region could be reassigned back to the same server. Use {@link - * #move(byte[], ServerName)} if you want to control the region movement. - * + * Unassign a region from current hosting regionserver. Region will then be assigned to a + * regionserver chosen at random. Region could be reassigned back to the same server. Use + * {@link #move(byte[], ServerName)} if you want to control the region movement. * @param regionName Region to unassign. Will clear any existing RegionPlan if one found. - * @param force If true, force unassign (Will remove region from regions-in-transition too if - * present. If results in double assignment use hbck -fix to resolve. To be used by experts). + * @param force If true, force unassign (Will remove region from + * regions-in-transition too if present. If results in double assignment use hbck -fix to + * resolve. To be used by experts). * @throws IOException if a remote or network exception occurs - * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} - * instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see HBASE-24875 */ @Deprecated @@ -1223,7 +1140,6 @@ default void unassign(byte[] regionName, boolean force) throws IOException { * still online as per Master's in memory state. If this API is incorrectly used on active region * then master will loose track of that region. This is a special method that should be used by * experts or hbck. - * * @param regionName Region to offline. * @throws IOException if a remote or network exception occurs */ @@ -1231,13 +1147,12 @@ default void unassign(byte[] regionName, boolean force) throws IOException { /** * Turn the load balancer on or off. - * - * @param synchronous If true, it waits until current balance() call, if - * outstanding, to return. + * @param synchronous If true, it waits until current balance() call, if outstanding, + * to return. * @return Previous balancer value * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #balancerSwitch(boolean, boolean)} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use + * {@link #balancerSwitch(boolean, boolean)} instead. */ @Deprecated default boolean setBalancerRunning(boolean on, boolean synchronous) throws IOException { @@ -1247,22 +1162,19 @@ default boolean setBalancerRunning(boolean on, boolean synchronous) throws IOExc /** * Turn the load balancer on or off. * @param onOrOff Set to true to enable, false to disable. - * @param synchronous If true, it waits until current balance() call, if - * outstanding, to return. + * @param synchronous If true, it waits until current balance() call, if outstanding, + * to return. * @return Previous balancer value * @throws IOException if a remote or network exception occurs */ - boolean balancerSwitch(boolean onOrOff, boolean synchronous) - throws IOException; + boolean balancerSwitch(boolean onOrOff, boolean synchronous) throws IOException; /** - * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the - * reassignments. Can NOT run for various reasons. Check logs. - * + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. Can NOT run for various reasons. Check logs. * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #balance()} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #balance()} instead. */ @Deprecated default boolean balancer() throws IOException { @@ -1270,21 +1182,18 @@ default boolean balancer() throws IOException { } /** - * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the - * reassignments. Can NOT run for various reasons. Check logs. - * + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. Can NOT run for various reasons. Check logs. * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs */ default boolean balance() throws IOException { - return balance(BalanceRequest.defaultInstance()) - .isBalancerRan(); + return balance(BalanceRequest.defaultInstance()).isBalancerRan(); } /** - * Invoke the balancer with the given balance request. The BalanceRequest defines how the - * balancer will run. See {@link BalanceRequest} for more details. - * + * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer + * will run. See {@link BalanceRequest} for more details. * @param request defines how the balancer should run * @return {@link BalanceResponse} with details about the results of the invocation. * @throws IOException if a remote or network exception occurs @@ -1292,15 +1201,14 @@ default boolean balance() throws IOException { BalanceResponse balance(BalanceRequest request) throws IOException; /** - * Invoke the balancer. Will run the balancer and if regions to move, it will - * go ahead and do the reassignments. If there is region in transition, force parameter of true - * would still run balancer. Can *not* run for other reasons. Check - * logs. + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. If there is region in transition, force parameter of true would still run + * balancer. Can *not* run for other reasons. Check logs. * @param force whether we should force balance even if there is region in transition * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ @Deprecated default boolean balancer(boolean force) throws IOException { @@ -1308,39 +1216,33 @@ default boolean balancer(boolean force) throws IOException { } /** - * Invoke the balancer. Will run the balancer and if regions to move, it will - * go ahead and do the reassignments. If there is region in transition, force parameter of true - * would still run balancer. Can *not* run for other reasons. Check - * logs. + * Invoke the balancer. Will run the balancer and if regions to move, it will go ahead and do the + * reassignments. If there is region in transition, force parameter of true would still run + * balancer. Can *not* run for other reasons. Check logs. * @param force whether we should force balance even if there is region in transition * @return true if balancer ran, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.5.0. Will be removed in 4.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ @Deprecated default boolean balance(boolean force) throws IOException { - return balance( - BalanceRequest.newBuilder() - .setIgnoreRegionsInTransition(force) - .build() - ).isBalancerRan(); + return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(force).build()) + .isBalancerRan(); } /** * Query the current state of the balancer. - * * @return true if the balancer is enabled, false otherwise. * @throws IOException if a remote or network exception occurs */ boolean isBalancerEnabled() throws IOException; /** - * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. - * Calling this API will drop all the cached blocks specific to a table from BlockCache. - * This can significantly impact the query performance as the subsequent queries will - * have to retrieve the blocks from underlying filesystem. - * + * Clear all the blocks corresponding to this table from BlockCache. For expert-admins. Calling + * this API will drop all the cached blocks specific to a table from BlockCache. This can + * significantly impact the query performance as the subsequent queries will have to retrieve the + * blocks from underlying filesystem. * @param tableName table to clear block cache * @return CacheEvictionStats related to the eviction * @throws IOException if a remote or network exception occurs @@ -1348,11 +1250,9 @@ default boolean balance(boolean force) throws IOException { CacheEvictionStats clearBlockCache(final TableName tableName) throws IOException; /** - * Invoke region normalizer. Can NOT run for various reasons. Check logs. - * This is a non-blocking invocation to region normalizer. If return value is true, it means - * the request was submitted successfully. We need to check logs for the details of which regions - * were split/merged. - * + * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking + * invocation to region normalizer. If return value is true, it means the request was submitted + * successfully. We need to check logs for the details of which regions were split/merged. * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs */ @@ -1361,11 +1261,9 @@ default boolean normalize() throws IOException { } /** - * Invoke region normalizer. Can NOT run for various reasons. Check logs. - * This is a non-blocking invocation to region normalizer. If return value is true, it means - * the request was submitted successfully. We need to check logs for the details of which regions - * were split/merged. - * + * Invoke region normalizer. Can NOT run for various reasons. Check logs. This is a non-blocking + * invocation to region normalizer. If return value is true, it means the request was submitted + * successfully. We need to check logs for the details of which regions were split/merged. * @param ntfp limit to tables matching the specified filter. * @return {@code true} if region normalizer ran, {@code false} otherwise. * @throws IOException if a remote or network exception occurs @@ -1374,7 +1272,6 @@ default boolean normalize() throws IOException { /** * Query the current state of the region normalizer. - * * @return true if region normalizer is enabled, false otherwise. * @throws IOException if a remote or network exception occurs */ @@ -1382,11 +1279,10 @@ default boolean normalize() throws IOException { /** * Turn region normalizer on or off. - * * @return Previous normalizer value * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #normalizerSwitch(boolean)}} - * instead. + * instead. */ @Deprecated default boolean setNormalizerRunning(boolean on) throws IOException { @@ -1395,20 +1291,18 @@ default boolean setNormalizerRunning(boolean on) throws IOException { /** * Turn region normalizer on or off. - * * @return Previous normalizer value * @throws IOException if a remote or network exception occurs */ - boolean normalizerSwitch (boolean on) throws IOException; + boolean normalizerSwitch(boolean on) throws IOException; /** * Enable/Disable the catalog janitor. - * * @param enable if true enables the catalog janitor * @return the previous state * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #catalogJanitorSwitch(boolean)}} - * instead. + * instead. */ @Deprecated default boolean enableCatalogJanitor(boolean enable) throws IOException { @@ -1417,7 +1311,6 @@ default boolean enableCatalogJanitor(boolean enable) throws IOException { /** * Enable/Disable the catalog janitor/ - * * @param onOrOff if true enables the catalog janitor * @return the previous state * @throws IOException if a remote or network exception occurs @@ -1426,11 +1319,9 @@ default boolean enableCatalogJanitor(boolean enable) throws IOException { /** * Ask for a scan of the catalog table. - * * @return the number of entries cleaned. Returns -1 if previous run is in progress. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #runCatalogJanitor()}} - * instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #runCatalogJanitor()}} instead. */ @Deprecated default int runCatalogScan() throws IOException { @@ -1439,7 +1330,6 @@ default int runCatalogScan() throws IOException { /** * Ask for a scan of the catalog table. - * * @return the number of entries cleaned * @throws IOException if a remote or network exception occurs */ @@ -1447,19 +1337,17 @@ default int runCatalogScan() throws IOException { /** * Query on the catalog janitor state (Enabled/Disabled?). - * * @throws IOException if a remote or network exception occurs */ boolean isCatalogJanitorEnabled() throws IOException; /** * Enable/Disable the cleaner chore. - * * @param on if true enables the cleaner chore * @return the previous state * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #cleanerChoreSwitch(boolean)}} - * instead. + * instead. */ @Deprecated default boolean setCleanerChoreRunning(boolean on) throws IOException { @@ -1468,7 +1356,6 @@ default boolean setCleanerChoreRunning(boolean on) throws IOException { /** * Enable/Disable the cleaner chore. - * * @param onOrOff if true enables the cleaner chore * @return the previous state * @throws IOException if a remote or network exception occurs @@ -1477,7 +1364,6 @@ default boolean setCleanerChoreRunning(boolean on) throws IOException { /** * Ask for cleaner chore to run. - * * @return true if cleaner chore ran, false otherwise * @throws IOException if a remote or network exception occurs */ @@ -1485,25 +1371,23 @@ default boolean setCleanerChoreRunning(boolean on) throws IOException { /** * Query on the cleaner chore state (Enabled/Disabled?). - * * @throws IOException if a remote or network exception occurs */ boolean isCleanerChoreEnabled() throws IOException; /** * Merge two regions. Asynchronous operation. - * * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b * @param forcible true if do a compulsory merge, otherwise we will only merge two - * adjacent regions + * adjacent regions * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0. Will be removed in 3.0. Use - * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. + * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. */ @Deprecated - void mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB, - boolean forcible) throws IOException; + void mergeRegions(byte[] nameOfRegionA, byte[] nameOfRegionB, boolean forcible) + throws IOException; /** * Merge two regions. Asynchronous operation. @@ -1535,8 +1419,8 @@ Future mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible) throws IOException; /** - * Split a table. The method will execute split action for each region in table. - * Asynchronous operation. + * Split a table. The method will execute split action for each region in table. Asynchronous + * operation. * @param tableName table to split * @throws IOException if a remote or network exception occurs */ @@ -1544,18 +1428,16 @@ Future mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible) /** * Split an individual region. Asynchronous operation. - * * @param regionName region to split * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #splitRegionAsync(byte[], byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #splitRegionAsync(byte[], byte[])}. */ @Deprecated void splitRegion(byte[] regionName) throws IOException; /** * Split a table. Asynchronous operation. - * * @param tableName table to split * @param splitPoint the explicit position to split on * @throws IOException if a remote or network exception occurs @@ -1564,16 +1446,14 @@ Future mergeRegionsAsync(byte[][] nameofRegionsToMerge, boolean forcible) /** * Split an individual region. Asynchronous operation. - * * @param regionName region to split * @param splitPoint the explicit position to split on * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #splitRegionAsync(byte[], byte[])}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #splitRegionAsync(byte[], byte[])}. */ @Deprecated - void splitRegion(byte[] regionName, byte[] splitPoint) - throws IOException; + void splitRegion(byte[] regionName, byte[] splitPoint) throws IOException; /** * Split an individual region. Asynchronous operation. @@ -1601,8 +1481,8 @@ void splitRegion(byte[] regionName, byte[] splitPoint) @Deprecated default void modifyTable(TableName tableName, TableDescriptor td) throws IOException { if (!tableName.equals(td.getTableName())) { - throw new IllegalArgumentException("the specified table name '" + tableName + - "' doesn't match with the HTD one: " + td.getTableName()); + throw new IllegalArgumentException("the specified table name '" + tableName + + "' doesn't match with the HTD one: " + td.getTableName()); } modifyTable(td); } @@ -1617,39 +1497,35 @@ default void modifyTable(TableDescriptor td) throws IOException { } /** - * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that - * it may be a while before your schema change is updated across all of the table. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Modify an existing table, more IRB friendly version. Asynchronous operation. This means that it + * may be a while before your schema change is updated across all of the table. You can use + * Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param tableName name of table. * @param td modified description of the table * @throws IOException if a remote or network exception occurs * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the - * operation to complete - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #modifyTableAsync(TableDescriptor)} + * operation to complete + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #modifyTableAsync(TableDescriptor)} */ @Deprecated default Future modifyTableAsync(TableName tableName, TableDescriptor td) throws IOException { if (!tableName.equals(td.getTableName())) { - throw new IllegalArgumentException("the specified table name '" + tableName + - "' doesn't match with the HTD one: " + td.getTableName()); + throw new IllegalArgumentException("the specified table name '" + tableName + + "' doesn't match with the HTD one: " + td.getTableName()); } return modifyTableAsync(td); } /** - * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means that - * it may be a while before your schema change is updated across all of the table. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * + * Modify an existing table, more IRB (ruby) friendly version. Asynchronous operation. This means + * that it may be a while before your schema change is updated across all of the table. You can + * use Future.get(long, TimeUnit) to wait on the operation to complete. It may throw + * ExecutionException if there was an error while executing the operation or TimeoutException in + * case the wait timeout was not long enough to allow the operation to complete. * @param td description of the table * @throws IOException if a remote or network exception occurs * @return the result of the async modify. You can use Future.get(long, TimeUnit) to wait on the @@ -1677,7 +1553,7 @@ default void modifyTableStoreFileTracker(TableName tableName, String dstSFT) thr * @throws IOException if a remote or network exception occurs */ Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT) - throws IOException; + throws IOException; /** * Shuts down the HBase cluster. @@ -1700,22 +1576,21 @@ Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT /** * Check whether Master is in maintenance mode. - * * @throws IOException if a remote or network exception occurs */ - boolean isMasterInMaintenanceMode() throws IOException; + boolean isMasterInMaintenanceMode() throws IOException; /** * Stop the designated regionserver. - * * @param hostnamePort Hostname and port delimited by a : as in - * example.org:1234 + * example.org:1234 * @throws IOException if a remote or network exception occurs */ void stopRegionServer(String hostnamePort) throws IOException; /** * Get whole cluster status, containing status about: + * *

        * hbase version
        * cluster id
    @@ -1725,10 +1600,11 @@ Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT
        * balancer
        * regions in transition
        * 
    + * * @return cluster status * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #getClusterMetrics()} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #getClusterMetrics()} */ @Deprecated default ClusterStatus getClusterStatus() throws IOException { @@ -1737,6 +1613,7 @@ default ClusterStatus getClusterStatus() throws IOException { /** * Get whole cluster metrics, containing status about: + * *
        * hbase version
        * cluster id
    @@ -1746,6 +1623,7 @@ default ClusterStatus getClusterStatus() throws IOException {
        * balancer
        * regions in transition
        * 
    + * * @return cluster metrics * @throws IOException if a remote or network exception occurs */ @@ -1785,9 +1663,8 @@ default Collection getRegionServers() throws IOException { } /** - * Retrieve all current live region servers including decommissioned - * if excludeDecommissionedRS is false, else non-decommissioned ones only - * + * Retrieve all current live region servers including decommissioned if excludeDecommissionedRS is + * false, else non-decommissioned ones only * @param excludeDecommissionedRS should we exclude decommissioned RS nodes * @return all current live region servers including/excluding decommissioned hosts * @throws IOException if a remote or network exception occurs @@ -1795,19 +1672,17 @@ default Collection getRegionServers() throws IOException { default Collection getRegionServers(boolean excludeDecommissionedRS) throws IOException { List allServers = - getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).getServersName(); + getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).getServersName(); if (!excludeDecommissionedRS) { return allServers; } List decommissionedRegionServers = listDecommissionedRegionServers(); - return allServers.stream() - .filter(s -> !decommissionedRegionServers.contains(s)) - .collect(ImmutableList.toImmutableList()); + return allServers.stream().filter(s -> !decommissionedRegionServers.contains(s)) + .collect(ImmutableList.toImmutableList()); } /** * Get {@link RegionMetrics} of all regions hosted on a regionserver. - * * @param serverName region server from which {@link RegionMetrics} is required. * @return a {@link RegionMetrics} list of all regions hosted on a region server * @throws IOException if a remote or network exception occurs @@ -1818,14 +1693,13 @@ default List getRegionMetrics(ServerName serverName) throws IOExc /** * Get {@link RegionMetrics} of all regions hosted on a regionserver for a table. - * * @param serverName region server from which {@link RegionMetrics} is required. * @param tableName get {@link RegionMetrics} of regions belonging to the table * @return region metrics map of all regions of a table hosted on a region server * @throws IOException if a remote or network exception occurs */ - List getRegionMetrics(ServerName serverName, - TableName tableName) throws IOException; + List getRegionMetrics(ServerName serverName, TableName tableName) + throws IOException; /** * @return Configuration used by the instance. @@ -1901,7 +1775,6 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * List available namespaces - * * @return List of namespace names * @throws IOException if a remote or network exception occurs */ @@ -1909,25 +1782,21 @@ NamespaceDescriptor getNamespaceDescriptor(String name) /** * List available namespace descriptors - * * @return List of descriptors * @throws IOException if a remote or network exception occurs */ - NamespaceDescriptor[] listNamespaceDescriptors() - throws IOException; + NamespaceDescriptor[] listNamespaceDescriptors() throws IOException; /** * Get list of table descriptors by namespace. - * * @param name namespace name * @return HTD[] the read-only tableDescriptors * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #listTableDescriptorsByNamespace(byte[])} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #listTableDescriptorsByNamespace(byte[])} */ @Deprecated - HTableDescriptor[] listTableDescriptorsByNamespace(String name) - throws IOException; + HTableDescriptor[] listTableDescriptorsByNamespace(String name) throws IOException; /** * Get list of table descriptors by namespace. @@ -1947,21 +1816,18 @@ HTableDescriptor[] listTableDescriptorsByNamespace(String name) /** * Get the regions of a given table. - * * @param tableName the name of the table * @return List of {@link HRegionInfo}. * @throws IOException if a remote or network exception occurs * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * (HBASE-17980). - * Use {@link #getRegions(TableName)}. + * (HBASE-17980). Use + * {@link #getRegions(TableName)}. */ @Deprecated - List getTableRegions(TableName tableName) - throws IOException; + List getTableRegions(TableName tableName) throws IOException; /** * Get the regions of a given table. - * * @param tableName the name of the table * @return List of {@link RegionInfo}. * @throws IOException if a remote or network exception occurs @@ -1973,39 +1839,33 @@ List getTableRegions(TableName tableName) /** * Get tableDescriptors. - * * @param tableNames List of table names * @return HTD[] the read-only tableDescriptors * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #listTableDescriptors(List)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #listTableDescriptors(List)} */ @Deprecated - HTableDescriptor[] getTableDescriptorsByTableName(List tableNames) - throws IOException; + HTableDescriptor[] getTableDescriptorsByTableName(List tableNames) throws IOException; /** * Get tableDescriptors. - * * @param tableNames List of table names * @return returns a list of TableDescriptors * @throws IOException if a remote or network exception occurs */ - List listTableDescriptors(List tableNames) - throws IOException; + List listTableDescriptors(List tableNames) throws IOException; /** * Get tableDescriptors. - * * @param names List of table names * @return HTD[] the read-only tableDescriptors * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #listTableDescriptors(List)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #listTableDescriptors(List)} */ @Deprecated - HTableDescriptor[] getTableDescriptors(List names) - throws IOException; + HTableDescriptor[] getTableDescriptors(List names) throws IOException; /** * Abort a procedure. @@ -2026,24 +1886,22 @@ default boolean abortProcedure(long procId, boolean mayInterruptIfRunning) throw } /** - * Abort a procedure but does not block and wait for completion. - * You can use Future.get(long, TimeUnit) to wait on the operation to complete. - * It may throw ExecutionException if there was an error while executing the operation - * or TimeoutException in case the wait timeout was not long enough to allow the - * operation to complete. - * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. - * + * Abort a procedure but does not block and wait for completion. You can use Future.get(long, + * TimeUnit) to wait on the operation to complete. It may throw ExecutionException if there was an + * error while executing the operation or TimeoutException in case the wait timeout was not long + * enough to allow the operation to complete. Do not use. Usually it is ignored but if not, it can + * do more damage than good. See hbck2. * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? - * @return true if aborted, false if procedure already completed or does not exist + * @return true if aborted, false if procedure already completed or does + * not exist * @throws IOException if a remote or network exception occurs * @deprecated since 2.1.1 and will be removed in 4.0.0. * @see HBASE-21223 */ @Deprecated - Future abortProcedureAsync( - long procId, - boolean mayInterruptIfRunning) throws IOException; + Future abortProcedureAsync(long procId, boolean mayInterruptIfRunning) + throws IOException; /** * Get procedures. @@ -2061,11 +1919,9 @@ Future abortProcedureAsync( /** * Roll the log writer. I.e. for filesystem based write ahead logs, start writing to a new file. - * * Note that the actual rolling of the log writer is asynchronous and may not be complete when - * this method returns. As a side effect of this call, the named region server may schedule - * store flushes at the request of the wal. - * + * this method returns. As a side effect of this call, the named region server may schedule store + * flushes at the request of the wal. * @param serverName The servername of the regionserver. * @throws IOException if a remote or network exception occurs * @throws org.apache.hadoop.hbase.regionserver.wal.FailedLogCloseException @@ -2077,8 +1933,8 @@ Future abortProcedureAsync( * @return an array of master coprocessors * @throws IOException if a remote or network exception occurs * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames() - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #getMasterCoprocessorNames()} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #getMasterCoprocessorNames()} */ @Deprecated default String[] getMasterCoprocessors() throws IOException { @@ -2092,14 +1948,12 @@ default String[] getMasterCoprocessors() throws IOException { * @see org.apache.hadoop.hbase.ClusterMetrics#getMasterCoprocessorNames() */ default List getMasterCoprocessorNames() throws IOException { - return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)) - .getMasterCoprocessorNames(); + return getClusterMetrics(EnumSet.of(Option.MASTER_COPROCESSORS)).getMasterCoprocessorNames(); } /** * Get the current compaction state of a table. It could be in a major compaction, a minor * compaction, both, or none. - * * @param tableName table to examine * @return the current compaction state * @throws IOException if a remote or network exception occurs @@ -2108,19 +1962,17 @@ default List getMasterCoprocessorNames() throws IOException { /** * Get the current compaction state of a table. It could be in a compaction, or none. - * * @param tableName table to examine * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @return the current compaction state * @throws IOException if a remote or network exception occurs */ - CompactionState getCompactionState(TableName tableName, - CompactType compactType) throws IOException; + CompactionState getCompactionState(TableName tableName, CompactType compactType) + throws IOException; /** * Get the current compaction state of region. It could be in a major compaction, a minor * compaction, both, or none. - * * @param regionName region to examine * @return the current compaction state * @throws IOException if a remote or network exception occurs @@ -2128,11 +1980,8 @@ CompactionState getCompactionState(TableName tableName, CompactionState getCompactionStateForRegion(byte[] regionName) throws IOException; /** - * Get the timestamp of the last major compaction for the passed table - * - * The timestamp of the oldest HFile resulting from a major compaction of that table, - * or 0 if no such HFile could be found. - * + * Get the timestamp of the last major compaction for the passed table The timestamp of the oldest + * HFile resulting from a major compaction of that table, or 0 if no such HFile could be found. * @param tableName table to examine * @return the last major compaction timestamp or 0 * @throws IOException if a remote or network exception occurs @@ -2140,11 +1989,9 @@ CompactionState getCompactionState(TableName tableName, long getLastMajorCompactionTimestamp(TableName tableName) throws IOException; /** - * Get the timestamp of the last major compaction for the passed region. - * - * The timestamp of the oldest HFile resulting from a major compaction of that region, - * or 0 if no such HFile could be found. - * + * Get the timestamp of the last major compaction for the passed region. The timestamp of the + * oldest HFile resulting from a major compaction of that region, or 0 if no such HFile could be + * found. * @param regionName region to examine * @return the last major compaction timestamp or 0 * @throws IOException if a remote or network exception occurs @@ -2211,48 +2058,45 @@ default void snapshot(String snapshotName, TableName tableName, SnapshotType typ /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * Snapshot can live with ttl seconds. - * - * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other - * snapshots stored on the cluster - * @param tableName name of the table to snapshot - * @param type type of snapshot to take + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can + * live with ttl seconds. + * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other + * snapshots stored on the cluster + * @param tableName name of the table to snapshot + * @param type type of snapshot to take * @param snapshotProps snapshot additional properties e.g. TTL - * @throws IOException we fail to reach the master + * @throws IOException we fail to reach the master * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ default void snapshot(String snapshotName, TableName tableName, SnapshotType type, - Map snapshotProps) throws IOException, - SnapshotCreationException, IllegalArgumentException { + Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, type, snapshotProps)); } /** * Create typed snapshot of the table. Snapshots are considered unique based on the name of the - * snapshot. Snapshots are taken sequentially even when requested concurrently, across - * all tables. Attempts to take a snapshot with the same name (even a different type or with - * different parameters) will fail with a {@link SnapshotCreationException} indicating the - * duplicate naming. Snapshot names follow the same naming constraints as tables in HBase. See - * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. - * Snapshot can live with ttl seconds. - * - * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other - * snapshots stored on the cluster - * @param tableName name of the table to snapshot + * snapshot. Snapshots are taken sequentially even when requested concurrently, across all + * tables. Attempts to take a snapshot with the same name (even a different type or with different + * parameters) will fail with a {@link SnapshotCreationException} indicating the duplicate naming. + * Snapshot names follow the same naming constraints as tables in HBase. See + * {@link org.apache.hadoop.hbase.TableName#isLegalFullyQualifiedTableName(byte[])}. Snapshot can + * live with ttl seconds. + * @param snapshotName name to give the snapshot on the filesystem. Must be unique from all other + * snapshots stored on the cluster + * @param tableName name of the table to snapshot * @param snapshotProps snapshot additional properties e.g. TTL - * @throws IOException we fail to reach the master + * @throws IOException we fail to reach the master * @throws SnapshotCreationException if snapshot creation failed - * @throws IllegalArgumentException if the snapshot request is formatted incorrectly + * @throws IllegalArgumentException if the snapshot request is formatted incorrectly */ - default void snapshot(String snapshotName, TableName tableName, - Map snapshotProps) throws IOException, - SnapshotCreationException, IllegalArgumentException { + default void snapshot(String snapshotName, TableName tableName, Map snapshotProps) + throws IOException, SnapshotCreationException, IllegalArgumentException { snapshot(new SnapshotDescription(snapshotName, tableName, SnapshotType.FLUSH, snapshotProps)); } @@ -2278,13 +2122,12 @@ void snapshot(SnapshotDescription snapshot) /** * Take a snapshot without waiting for the server to complete that snapshot (asynchronous) Only a * single snapshot should be taken at a time, or results may be undefined. - * * @param snapshot snapshot to take * @throws IOException if the snapshot did not succeed or we lose contact with the master. * @throws SnapshotCreationException if snapshot creation failed * @throws IllegalArgumentException if the snapshot request is formatted incorrectly * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #snapshotAsync(SnapshotDescription)} instead. + * {@link #snapshotAsync(SnapshotDescription)} instead. */ @Deprecated @SuppressWarnings("FutureReturnValueIgnored") @@ -2305,20 +2148,22 @@ Future snapshotAsync(SnapshotDescription snapshot) throws IOException, SnapshotCreationException; /** - * Check the current state of the passed snapshot. There are three possible states:
      - *
    1. running - returns false
    2. finished - returns true
    3. - *
    4. finished with error - throws the exception that caused the snapshot to fail
    The - * cluster only knows about the most recent snapshot. Therefore, if another snapshot has been - * run/started since the snapshot you are checking, you will receive an {@link - * org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}. - * + * Check the current state of the passed snapshot. There are three possible states: + *
      + *
    1. running - returns false
    2. + *
    3. finished - returns true
    4. + *
    5. finished with error - throws the exception that caused the snapshot to fail
    6. + *
    + * The cluster only knows about the most recent snapshot. Therefore, if another snapshot has been + * run/started since the snapshot you are checking, you will receive an + * {@link org.apache.hadoop.hbase.snapshot.UnknownSnapshotException}. * @param snapshot description of the snapshot to check * @return true if the snapshot is completed, false if the snapshot is still - * running + * running * @throws IOException if we have a network issue * @throws org.apache.hadoop.hbase.snapshot.HBaseSnapshotException if the snapshot failed * @throws org.apache.hadoop.hbase.snapshot.UnknownSnapshotException if the requested snapshot is - * unknown + * unknown */ boolean isSnapshotFinished(SnapshotDescription snapshot) throws IOException, HBaseSnapshotException, UnknownSnapshotException; @@ -2426,8 +2271,8 @@ default void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot) * @throws RestoreSnapshotException if snapshot failed to be restored * @throws IllegalArgumentException if the restore request is formatted incorrectly */ - void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl) throws IOException, RestoreSnapshotException; + void restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, boolean restoreAcl) + throws IOException, RestoreSnapshotException; /** * Create a new table by cloning the snapshot content. @@ -2472,8 +2317,7 @@ default void cloneSnapshot(String snapshotName, TableName tableName) * @throws IllegalArgumentException if the specified table has not a valid name */ default void cloneSnapshot(String snapshotName, TableName tableName, boolean restoreAcl, - String customSFT) - throws IOException, TableExistsException, RestoreSnapshotException { + String customSFT) throws IOException, TableExistsException, RestoreSnapshotException { get(cloneSnapshotAsync(snapshotName, tableName, restoreAcl, customSFT), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } @@ -2523,8 +2367,7 @@ default Future cloneSnapshotAsync(String snapshotName, TableName tableName * @throws IllegalArgumentException if the specified table has not a valid name */ default Future cloneSnapshotAsync(String snapshotName, TableName tableName, - boolean restoreAcl) - throws IOException, TableExistsException, RestoreSnapshotException { + boolean restoreAcl) throws IOException, TableExistsException, RestoreSnapshotException { return cloneSnapshotAsync(snapshotName, tableName, restoreAcl, null); } @@ -2540,15 +2383,14 @@ default Future cloneSnapshotAsync(String snapshotName, TableName tableName * @throws IllegalArgumentException if the specified table has not a valid name */ Future cloneSnapshotAsync(String snapshotName, TableName tableName, boolean restoreAcl, - String customSFT) throws IOException, TableExistsException, RestoreSnapshotException; + String customSFT) throws IOException, TableExistsException, RestoreSnapshotException; /** * Execute a distributed procedure on a cluster. - * * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). + * root ZK node name of the procedure). * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. + * optional. * @param props Property/Value pairs of properties passing to the procedure * @throws IOException if a remote or network exception occurs */ @@ -2557,16 +2399,15 @@ void execProcedure(String signature, String instance, Map props) /** * Execute a distributed procedure on a cluster. - * * @param signature A distributed procedure is uniquely identified by its signature (default the - * root ZK node name of the procedure). + * root ZK node name of the procedure). * @param instance The instance name of the procedure. For some procedures, this parameter is - * optional. + * optional. * @param props Property/Value pairs of properties passing to the procedure * @return data returned after procedure execution. null if no return data. * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #execProcedureWithReturn(String, String, Map)} } instead. + * {@link #execProcedureWithReturn(String, String, Map)} } instead. */ @Deprecated default byte[] execProcedureWithRet(String signature, String instance, Map props) @@ -2588,14 +2429,17 @@ byte[] execProcedureWithReturn(String signature, String instance, Map - *
  • running - returns false
  • finished - returns true
  • - *
  • finished with error - throws the exception that caused the procedure to fail
  • - * + * Check the current state of the specified procedure. There are three possible states: + *
      + *
    1. running - returns false
    2. + *
    3. finished - returns true
    4. + *
    5. finished with error - throws the exception that caused the procedure to fail
    6. + *
    * @param signature The signature that uniquely identifies a procedure * @param instance The instance name of the procedure * @param props Property/Value pairs of properties passing to the procedure - * @return true if the specified procedure is finished successfully, false if it is still running + * @return true if the specified procedure is finished successfully, + * false if it is still running * @throws IOException if the specified procedure finished with error */ boolean isProcedureFinished(String signature, String instance, Map props) @@ -2603,7 +2447,6 @@ boolean isProcedureFinished(String signature, String instance, Map listSnapshots(String regex) throws IOException; /** * List all the completed snapshots matching the given pattern. - * * @param pattern The compiled regular expression to match against * @return list of SnapshotDescription * @throws IOException if a remote or network exception occurs @@ -2637,12 +2478,12 @@ boolean isProcedureFinished(String signature, String instance, Map listTableSnapshots(String tableNameRegex, - String snapshotNameRegex) throws IOException; + List listTableSnapshots(String tableNameRegex, String snapshotNameRegex) + throws IOException; /** * List all the completed snapshots matching the given table name regular expression and snapshot @@ -2657,7 +2498,6 @@ List listTableSnapshots(Pattern tableNamePattern, /** * Delete an existing snapshot. - * * @param snapshotName name of the snapshot * @throws IOException if a remote or network exception occurs * @deprecated Since 2.2.0. Will be removed in 3.0.0. Use {@link #deleteSnapshot(String)} instead. @@ -2667,7 +2507,6 @@ List listTableSnapshots(Pattern tableNamePattern, /** * Delete an existing snapshot. - * * @param snapshotName name of the snapshot * @throws IOException if a remote or network exception occurs */ @@ -2675,18 +2514,16 @@ List listTableSnapshots(Pattern tableNamePattern, /** * Delete existing snapshots whose names match the pattern passed. - * * @param regex The regular expression to match against * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #deleteSnapshots(Pattern)} instead. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #deleteSnapshots(Pattern)} instead. */ @Deprecated void deleteSnapshots(String regex) throws IOException; /** * Delete existing snapshots whose names match the pattern passed. - * * @param pattern pattern for names of the snapshot to match * @throws IOException if a remote or network exception occurs */ @@ -2698,8 +2535,8 @@ List listTableSnapshots(Pattern tableNamePattern, * @param tableNameRegex The table name regular expression to match against * @param snapshotNameRegex The snapshot name regular expression to match against * @throws IOException if a remote or network exception occurs - * @deprecated since 2.0 version and will be removed in 3.0 version. - * Use {@link #deleteTableSnapshots(Pattern, Pattern)} instead. + * @deprecated since 2.0 version and will be removed in 3.0 version. Use + * {@link #deleteTableSnapshots(Pattern, Pattern)} instead. */ @Deprecated void deleteTableSnapshots(String tableNameRegex, String snapshotNameRegex) throws IOException; @@ -2716,7 +2553,6 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) /** * Apply the new quota settings. - * * @param quota the quota settings * @throws IOException if a remote or network exception occurs */ @@ -2742,60 +2578,60 @@ void deleteTableSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) /** * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the active - * master.

    The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access - * a published coprocessor {@link com.google.protobuf.Service} using standard protobuf service - * invocations:

    - *
    +   * master.
    +   * 

    + * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published + * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: + *

    + *
    + * + *
        * CoprocessorRpcChannel channel = myAdmin.coprocessorService();
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
        * MyCallRequest request = MyCallRequest.newBuilder()
        *     ...
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
    -   * 
    - * + *
    + * + *
    * @return A MasterCoprocessorRpcChannel instance */ CoprocessorRpcChannel coprocessorService(); - /** - * Creates and returns a {@link com.google.protobuf.RpcChannel} instance - * connected to the passed region server. - * + * Creates and returns a {@link com.google.protobuf.RpcChannel} instance connected to the passed + * region server. *

    * The obtained {@link com.google.protobuf.RpcChannel} instance can be used to access a published * coprocessor {@link com.google.protobuf.Service} using standard protobuf service invocations: *

    - * - *
    - *
    +   * 
    + * + *
        * CoprocessorRpcChannel channel = myAdmin.coprocessorService(serverName);
        * MyService.BlockingInterface service = MyService.newBlockingStub(channel);
        * MyCallRequest request = MyCallRequest.newBuilder()
        *     ...
        *     .build();
        * MyCallResponse response = service.myCall(null, request);
    -   * 
    - * + *
    + * + *
    * @param serverName the server name to which the endpoint call is made * @return A RegionServerCoprocessorRpcChannel instance */ CoprocessorRpcChannel coprocessorService(ServerName serverName); - /** - * Update the configuration and trigger an online config change - * on the regionserver. + * Update the configuration and trigger an online config change on the regionserver. * @param server : The server whose config needs to be updated. * @throws IOException if a remote or network exception occurs */ void updateConfiguration(ServerName server) throws IOException; - /** - * Update the configuration and trigger an online config change - * on all the regionservers. + * Update the configuration and trigger an online config change on all the regionservers. * @throws IOException if a remote or network exception occurs */ void updateConfiguration() throws IOException; @@ -2868,11 +2704,10 @@ default boolean[] setSplitOrMergeEnabled(boolean enabled, boolean synchronous, /** * Query the current state of the switch. - * * @return true if the switch is enabled, false otherwise. * @throws IOException if a remote or network exception occurs - * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use - * {@link #isSplitEnabled()} or {@link #isMergeEnabled()} instead. + * @deprecated Since 2.0.0. Will be removed in 3.0.0. Use {@link #isSplitEnabled()} or + * {@link #isMergeEnabled()} instead. */ @Deprecated default boolean isSplitOrMergeEnabled(MasterSwitchType switchType) throws IOException { @@ -2962,8 +2797,7 @@ Future addReplicationPeerAsync(String peerId, ReplicationPeerConfig peerCo * @throws IOException if a remote or network exception occurs */ default void removeReplicationPeer(String peerId) throws IOException { - get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), - TimeUnit.MILLISECONDS); + get(removeReplicationPeerAsync(peerId), getSyncWaitTimeout(), TimeUnit.MILLISECONDS); } /** @@ -3068,7 +2902,7 @@ default void appendReplicationPeerTableCFs(String id, Map listReplicationPeers(Pattern pattern) throws IOException; /** - * Mark region server(s) as decommissioned to prevent additional regions from getting - * assigned to them. Optionally unload the regions on the servers. If there are multiple servers - * to be decommissioned, decommissioning them at the same time can prevent wasteful region - * movements. Region unloading is asynchronous. + * Mark region server(s) as decommissioned to prevent additional regions from getting assigned to + * them. Optionally unload the regions on the servers. If there are multiple servers to be + * decommissioned, decommissioning them at the same time can prevent wasteful region movements. + * Region unloading is asynchronous. * @param servers The list of servers to decommission. * @param offload True to offload the regions from the decommissioned servers * @throws IOException if a remote or network exception occurs @@ -3124,9 +2958,8 @@ default void removeReplicationPeerTableCFs(String id, Map listDecommissionedRegionServers() throws IOException; /** - * Remove decommission marker from a region server to allow regions assignments. - * Load regions onto the server if a list of regions is given. Region loading is - * asynchronous. + * Remove decommission marker from a region server to allow regions assignments. Load regions onto + * the server if a list of regions is given. Region loading is asynchronous. * @param server The server to recommission. * @param encodedRegionNames Regions to load onto the server. * @throws IOException if a remote or network exception occurs @@ -3163,7 +2996,7 @@ void recommissionRegionServer(ServerName server, List encodedRegionNames * @throws InterruptedException */ void clearCompactionQueues(ServerName serverName, Set queues) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** * List dead region servers. @@ -3208,8 +3041,8 @@ void cloneTableSchema(TableName tableName, TableName newTableName, boolean prese boolean isRpcThrottleEnabled() throws IOException; /** - * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota - * can be exceeded if region server has availble quota. + * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be + * exceeded if region server has availble quota. * @param enable Set to true to enable, false to disable. * @return Previous exceed throttle enabled value * @throws IOException if a remote or network exception occurs @@ -3226,8 +3059,8 @@ void cloneTableSchema(TableName tableName, TableName newTableName, boolean prese * Fetches the observed {@link SpaceQuotaSnapshotView}s observed by a RegionServer. * @throws IOException if a remote or network exception occurs */ - Map getRegionServerSpaceQuotaSnapshots( - ServerName serverName) throws IOException; + Map + getRegionServerSpaceQuotaSnapshots(ServerName serverName) throws IOException; /** * Returns the Master's view of a quota on the given {@code namespace} or null if the Master has @@ -3292,36 +3125,30 @@ default List hasUserPermissions(List permissions) throws IO /** * Turn on or off the auto snapshot cleanup based on TTL. - * * @param on Set to true to enable, false to disable. * @param synchronous If true, it waits until current snapshot cleanup is completed, - * if outstanding. + * if outstanding. * @return Previous auto snapshot cleanup value * @throws IOException if a remote or network exception occurs */ - boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) - throws IOException; + boolean snapshotCleanupSwitch(final boolean on, final boolean synchronous) throws IOException; /** * Query the current state of the auto snapshot cleanup based on TTL. - * - * @return true if the auto snapshot cleanup is enabled, - * false otherwise. + * @return true if the auto snapshot cleanup is enabled, false + * otherwise. * @throws IOException if a remote or network exception occurs */ boolean isSnapshotCleanupEnabled() throws IOException; - /** - * Retrieves online slow/large RPC logs from the provided list of - * RegionServers - * + * Retrieves online slow/large RPC logs from the provided list of RegionServers * @param serverNames Server names to get slowlog responses from * @param logQueryFilter filter to be used if provided (determines slow / large RPC logs) * @return online slowlog response list * @throws IOException if a remote or network exception occurs - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. */ @Deprecated default List getSlowLogResponses(final Set serverNames, @@ -3338,33 +3165,27 @@ default List getSlowLogResponses(final Set serverNa filterParams.put("tableName", logQueryFilter.getTableName()); filterParams.put("userName", logQueryFilter.getUserName()); filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString()); - List logEntries = - getLogEntries(serverNames, logType, ServerType.REGION_SERVER, logQueryFilter.getLimit(), - filterParams); + List logEntries = getLogEntries(serverNames, logType, ServerType.REGION_SERVER, + logQueryFilter.getLimit(), filterParams); return logEntries.stream().map(logEntry -> (OnlineLogRecord) logEntry) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } /** - * Clears online slow/large RPC logs from the provided list of - * RegionServers - * + * Clears online slow/large RPC logs from the provided list of RegionServers * @param serverNames Set of Server names to clean slowlog responses from - * @return List of booleans representing if online slowlog response buffer is cleaned - * from each RegionServer + * @return List of booleans representing if online slowlog response buffer is cleaned from each + * RegionServer * @throws IOException if a remote or network exception occurs */ - List clearSlowLogResponses(final Set serverNames) - throws IOException; - + List clearSlowLogResponses(final Set serverNames) throws IOException; /** - * Retrieve recent online records from HMaster / RegionServers. - * Examples include slow/large RPC logs, balancer decisions by master. - * - * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC + * logs, balancer decisions by master. + * @param serverNames servers to retrieve records from, useful in case of records maintained by + * RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response @@ -3372,6 +3193,6 @@ List clearSlowLogResponses(final Set serverNames) * @return Log entries representing online records from servers * @throws IOException if a remote or network exception occurs */ - List getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams) throws IOException; + List getLogEntries(Set serverNames, String logType, ServerType serverType, + int limit, Map filterParams) throws IOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java index 10933abf3cf2..7ead41af6984 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AdvancedScanResultConsumer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Optional; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java index 8d21994c23e0..3ef28308f1c8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AllowPartialScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index 3a08d687fbbb..50fc36e80133 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -36,12 +36,12 @@ /** * Performs Append operations on a single row. *

    - * This operation ensures atomicty to readers. Appends are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. + * This operation ensures atomicty to readers. Appends are done under a single row lock, so write + * operations to a row are synchronized, and readers are guaranteed to see this operation fully + * completed. *

    - * To append to a set of columns of a row, instantiate an Append object with the - * row to append to. At least one column to append must be specified using the + * To append to a set of columns of a row, instantiate an Append object with the row to append to. + * At least one column to append must be specified using the * {@link #addColumn(byte[], byte[], byte[])} method. */ @InterfaceAudience.Public @@ -53,12 +53,11 @@ public class Append extends Mutation { /** * Sets the TimeRange to be used on the Get for this append. *

    - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this append, you can potentially gain - * some performance with a more optimal Get operation. - * Be careful adding the time range to this class as you will update the old cell if the - * time range doesn't include the latest cells. + * This is useful for when you have counters that only last for specific periods of time (ie. + * counters that are partitioned by time). By setting the range of valid times for this append, + * you can potentially gain some performance with a more optimal Get operation. Be careful adding + * the time range to this class as you will update the old cell if the time range doesn't include + * the latest cells. *

    * This range is used as [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive @@ -79,15 +78,13 @@ public TimeRange getTimeRange() { } @Override - protected long extraHeapSize(){ + protected long extraHeapSize() { return HEAP_OVERHEAD; } /** - * @param returnResults - * True (default) if the append operation should return the results. - * A client that is not interested in the result can save network - * bandwidth setting this to false. + * @param returnResults True (default) if the append operation should return the results. A client + * that is not interested in the result can save network bandwidth setting this to false. */ @Override public Append setReturnResults(boolean returnResults) { @@ -113,6 +110,7 @@ public boolean isReturnResults() { public Append(byte[] row) { this(row, 0, row.length); } + /** * Copy constructor * @param appendToCopy append to copy @@ -122,27 +120,27 @@ public Append(Append appendToCopy) { this.tr = appendToCopy.getTimeRange(); } - /** Create a Append operation for the specified row. + /** + * Create a Append operation for the specified row. *

    * At least one column must be appended to. * @param rowArray Makes a copy out of this buffer. * @param rowOffset * @param rowLength */ - public Append(final byte [] rowArray, final int rowOffset, final int rowLength) { + public Append(final byte[] rowArray, final int rowOffset, final int rowLength) { checkRow(rowArray, rowOffset, rowLength); this.row = Bytes.copy(rowArray, rowOffset, rowLength); } /** - * Construct the Append with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. + * Construct the Append with user defined data. NOTED: 1) all cells in the familyMap must have the + * Type.Put 2) the row of each cell must be same with passed row. * @param row row. CAN'T be null * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Append(byte[] row, long ts, NavigableMap> familyMap) { + public Append(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } @@ -152,11 +150,11 @@ public Append(byte[] row, long ts, NavigableMap> familyMap) * @param qualifier column qualifier * @param value value to append to specified column * @return this - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #addColumn(byte[], byte[], byte[])} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #addColumn(byte[], byte[], byte[])} instead */ @Deprecated - public Append add(byte [] family, byte [] qualifier, byte [] value) { + public Append add(byte[] family, byte[] qualifier, byte[] value) { return this.addColumn(family, qualifier, value); } @@ -211,8 +209,8 @@ public Append setDurability(Durability d) { /** * Method for setting the Append's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Append#Append(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Append#Append(byte[], long, NavigableMap)} instead */ @Deprecated @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java index 62c9e32f8dc5..c1db90728b8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdmin.java @@ -165,8 +165,8 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ /** * Creates a new table with an initial set of empty regions defined by the specified split keys. - * The total number of regions created will be the number of split keys plus one. - * Note : Avoid passing empty split key. + * The total number of regions created will be the number of split keys plus one. Note : Avoid + * passing empty split key. * @param desc table descriptor for table * @param splitKeys array of split keys for the initial regions of the table */ @@ -248,8 +248,7 @@ CompletableFuture createTable(TableDescriptor desc, byte[] startKey, byte[ * @param tableName name of the table to add column family to * @param columnFamily column family descriptor of column family to be added */ - CompletableFuture addColumnFamily(TableName tableName, - ColumnFamilyDescriptor columnFamily); + CompletableFuture addColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily); /** * Delete a column family from a table. @@ -273,7 +272,7 @@ CompletableFuture modifyColumnFamily(TableName tableName, * @param dstSFT the destination store file tracker */ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, byte[] family, - String dstSFT); + String dstSFT); /** * Create a new namespace. @@ -329,8 +328,8 @@ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, CompletableFuture flush(TableName tableName); /** - * Flush the specified column family stores on all regions of the passed table. - * This runs as a synchronous operation. + * Flush the specified column family stores on all regions of the passed table. This runs as a + * synchronous operation. * @param tableName table to flush * @param columnFamily column family within a table */ @@ -358,8 +357,8 @@ CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, /** * Compact a table. When the returned CompletableFuture is done, it only means the compact request - * was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to compact */ default CompletableFuture compact(TableName tableName) { @@ -369,8 +368,7 @@ default CompletableFuture compact(TableName tableName) { /** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to compact * @param columnFamily column family within a table. If not present, compact the table's all * column families. @@ -381,9 +379,9 @@ default CompletableFuture compact(TableName tableName, byte[] columnFamily /** * Compact a table. When the returned CompletableFuture is done, it only means the compact request - * was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for - * normal compaction type. + * was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for normal compaction + * type. * @param tableName table to compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ @@ -392,8 +390,7 @@ default CompletableFuture compact(TableName tableName, byte[] columnFamily /** * Compact a column family within a table. When the returned CompletableFuture is done, it only * means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction type. * @param tableName table to compact * @param columnFamily column family within a table @@ -421,8 +418,8 @@ CompletableFuture compact(TableName tableName, byte[] columnFamily, /** * Major compact a table. When the returned CompletableFuture is done, it only means the compact - * request was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * request was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to major compact */ default CompletableFuture majorCompact(TableName tableName) { @@ -432,8 +429,7 @@ default CompletableFuture majorCompact(TableName tableName) { /** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for * normal compaction. type. * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all @@ -445,9 +441,9 @@ default CompletableFuture majorCompact(TableName tableName, byte[] columnF /** * Major compact a table. When the returned CompletableFuture is done, it only means the compact - * request was sent to HBase and may need some time to finish the compact operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for - * normal compaction type. + * request was sent to HBase and may need some time to finish the compact operation. Throws + * {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found for normal compaction + * type. * @param tableName table to major compact * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} */ @@ -456,8 +452,7 @@ default CompletableFuture majorCompact(TableName tableName, byte[] columnF /** * Major compact a column family within a table. When the returned CompletableFuture is done, it * only means the compact request was sent to HBase and may need some time to finish the compact - * operation. - * Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. + * operation. Throws {@link org.apache.hadoop.hbase.TableNotFoundException} if table not found. * @param tableName table to major compact * @param columnFamily column family within a table. If not present, major compact the table's all * column families. @@ -622,8 +617,7 @@ default CompletableFuture mergeRegions(byte[] nameOfRegionA, byte[] nameOf * @param forcible If true, force unassign (Will remove region from regions-in-transition too if * present. If results in double assignment use hbck -fix to resolve. To be used by * experts). - * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} - * instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use {@link #unassign(byte[])} instead. * @see HBASE-24875 */ @Deprecated @@ -685,8 +679,8 @@ default CompletableFuture addReplicationPeer(String peerId, * @param peerConfig configuration for the replication slave cluster * @param enabled peer state, true if ENABLED and false if DISABLED */ - CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled); + CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled); /** * Remove a peer and stop the replication @@ -1008,8 +1002,8 @@ CompletableFuture isProcedureFinished(String signature, String instance Map props); /** - * Abort a procedure - * Do not use. Usually it is ignored but if not, it can do more damage than good. See hbck2. + * Abort a procedure Do not use. Usually it is ignored but if not, it can do more damage than + * good. See hbck2. * @param procId ID of the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if aborted, false if procedure already completed or does not exist. the value is @@ -1033,10 +1027,10 @@ CompletableFuture isProcedureFinished(String signature, String instance CompletableFuture getLocks(); /** - * Mark region server(s) as decommissioned to prevent additional regions from getting - * assigned to them. Optionally unload the regions on the servers. If there are multiple servers - * to be decommissioned, decommissioning them at the same time can prevent wasteful region - * movements. Region unloading is asynchronous. + * Mark region server(s) as decommissioned to prevent additional regions from getting assigned to + * them. Optionally unload the regions on the servers. If there are multiple servers to be + * decommissioned, decommissioning them at the same time can prevent wasteful region movements. + * Region unloading is asynchronous. * @param servers The list of servers to decommission. * @param offload True to offload the regions from the decommissioned servers */ @@ -1079,7 +1073,7 @@ default CompletableFuture getMaster() { */ default CompletableFuture> getBackupMasters() { return getClusterMetrics(EnumSet.of(Option.BACKUP_MASTERS)) - .thenApply(ClusterMetrics::getBackupMasterNames); + .thenApply(ClusterMetrics::getBackupMasterNames); } /** @@ -1090,8 +1084,8 @@ default CompletableFuture> getRegionServers() { .thenApply(ClusterMetrics::getServersName); } - default CompletableFuture> getRegionServers( - boolean excludeDecommissionedRS) { + default CompletableFuture> + getRegionServers(boolean excludeDecommissionedRS) { CompletableFuture> future = new CompletableFuture<>(); addListener( getClusterMetrics(EnumSet.of(Option.SERVERS_NAME)).thenApply(ClusterMetrics::getServersName), @@ -1107,7 +1101,7 @@ default CompletableFuture> getRegionServers( future.completeExceptionally(decomErr); } else { future.complete(allServers.stream().filter(s -> !decomServers.contains(s)) - .collect(ImmutableList.toImmutableList())); + .collect(ImmutableList.toImmutableList())); } }); } @@ -1129,8 +1123,8 @@ default CompletableFuture> getMasterCoprocessorNames() { * @return master info port */ default CompletableFuture getMasterInfoPort() { - return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)).thenApply( - ClusterMetrics::getMasterInfoPort); + return getClusterMetrics(EnumSet.of(Option.MASTER_INFO_PORT)) + .thenApply(ClusterMetrics::getMasterInfoPort); } /** @@ -1193,7 +1187,7 @@ default CompletableFuture getMasterInfoPort() { * @return a list of {@link RegionMetrics} wrapped by {@link CompletableFuture} */ CompletableFuture> getRegionMetrics(ServerName serverName, - TableName tableName); + TableName tableName); /** * Check whether master is in maintenance mode @@ -1285,8 +1279,7 @@ default CompletableFuture balancerSwitch(boolean on) { * {@link CompletableFuture}. */ default CompletableFuture balance() { - return balance(BalanceRequest.defaultInstance()) - .thenApply(BalanceResponse::isBalancerRan); + return balance(BalanceRequest.defaultInstance()).thenApply(BalanceResponse::isBalancerRan); } /** @@ -1296,21 +1289,17 @@ default CompletableFuture balance() { * @param forcible whether we should force balance even if there is region in transition. * @return True if balancer ran, false otherwise. The return value will be wrapped by a * {@link CompletableFuture}. - * @deprecated Since 2.5.0. Will be removed in 4.0.0. - * Use {@link #balance(BalanceRequest)} instead. + * @deprecated Since 2.5.0. Will be removed in 4.0.0. Use {@link #balance(BalanceRequest)} + * instead. */ default CompletableFuture balance(boolean forcible) { - return balance( - BalanceRequest.newBuilder() - .setIgnoreRegionsInTransition(forcible) - .build() - ).thenApply(BalanceResponse::isBalancerRan); + return balance(BalanceRequest.newBuilder().setIgnoreRegionsInTransition(forcible).build()) + .thenApply(BalanceResponse::isBalancerRan); } /** - * Invoke the balancer with the given balance request. The BalanceRequest defines how the - * balancer will run. See {@link BalanceRequest} for more details. - * + * Invoke the balancer with the given balance request. The BalanceRequest defines how the balancer + * will run. See {@link BalanceRequest} for more details. * @param request defines how the balancer should run * @return {@link BalanceResponse} with details about the results of the invocation. */ @@ -1318,8 +1307,8 @@ default CompletableFuture balance(boolean forcible) { /** * Query the current state of the balancer. - * @return true if the balance switch is on, false otherwise. The return value will be wrapped by a - * {@link CompletableFuture}. + * @return true if the balance switch is on, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture}. */ CompletableFuture isBalancerEnabled(); @@ -1363,8 +1352,8 @@ default CompletableFuture normalize() { /** * Query the current state of the cleaner chore. - * @return true if cleaner chore is on, false otherwise. The return value will be wrapped by - * a {@link CompletableFuture} + * @return true if cleaner chore is on, false otherwise. The return value will be wrapped by a + * {@link CompletableFuture} */ CompletableFuture isCleanerChoreEnabled(); @@ -1384,8 +1373,8 @@ default CompletableFuture normalize() { /** * Query on the catalog janitor state. - * @return true if the catalog janitor is on, false otherwise. The return value will be - * wrapped by a {@link CompletableFuture} + * @return true if the catalog janitor is on, false otherwise. The return value will be wrapped by + * a {@link CompletableFuture} */ CompletableFuture isCatalogJanitorEnabled(); @@ -1407,6 +1396,7 @@ default CompletableFuture normalize() { * channel -> xxxService.newStub(channel) * *

    + * * @param stubMaker a delegation to the actual {@code newStub} call. * @param callable a delegation to the actual protobuf rpc call. See the comment of * {@link ServiceCaller} for more details. @@ -1429,6 +1419,7 @@ CompletableFuture coprocessorService(Function stubMaker * channel -> xxxService.newStub(channel) * * + * * @param stubMaker a delegation to the actual {@code newStub} call. * @param callable a delegation to the actual protobuf rpc call. See the comment of * {@link ServiceCaller} for more details. @@ -1439,7 +1430,7 @@ CompletableFuture coprocessorService(Function stubMaker * @see ServiceCaller */ CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, ServerName serverName); + ServiceCaller callable, ServerName serverName); /** * List all the dead region servers. @@ -1468,21 +1459,19 @@ default CompletableFuture> listDeadServers() { /** * Create a new table by cloning the existent table schema. - * * @param tableName name of the table to be cloned * @param newTableName name of the new table where the table will be created * @param preserveSplits True if the splits should be preserved */ - CompletableFuture cloneTableSchema(final TableName tableName, - final TableName newTableName, final boolean preserveSplits); + CompletableFuture cloneTableSchema(final TableName tableName, final TableName newTableName, + final boolean preserveSplits); /** * Turn the compaction on or off. Disabling compactions will also interrupt any currently ongoing - * compactions. This state is ephemeral. The setting will be lost on restart. Compaction - * can also be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled - * in hbase-site.xml. - * - * @param switchState Set to true to enable, false to disable. + * compactions. This state is ephemeral. The setting will be lost on restart. Compaction can also + * be enabled/disabled by modifying configuration hbase.regionserver.compaction.enabled in + * hbase-site.xml. + * @param switchState Set to true to enable, false to disable. * @param serverNamesList list of region servers. * @return Previous compaction states for region servers */ @@ -1503,8 +1492,8 @@ CompletableFuture> compactionSwitch(boolean switchState CompletableFuture isRpcThrottleEnabled(); /** - * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota - * can be exceeded if region server has availble quota. + * Switch the exceed throttle quota. If enabled, user/table/namespace throttle quota can be + * exceeded if region server has availble quota. * @param enable Set to true to enable, false to disable. * @return Previous exceed throttle enabled value */ @@ -1532,8 +1521,8 @@ CompletableFuture> compactionSwitch(boolean switchState * Returns the Master's view of a quota on the given {@code tableName} or null if the Master has * no quota information on that table. */ - CompletableFuture getCurrentSpaceQuotaSnapshot( - TableName tableName); + CompletableFuture + getCurrentSpaceQuotaSnapshot(TableName tableName); /** * Grants user specific permissions @@ -1583,35 +1572,31 @@ default CompletableFuture> hasUserPermissions(List per * Notice that, the method itself is always non-blocking, which means it will always return * immediately. The {@code sync} parameter only effects when will we complete the returned * {@link CompletableFuture}. - * * @param on Set to true to enable, false to disable. - * @param sync If true, it waits until current snapshot cleanup is completed, - * if outstanding. + * @param sync If true, it waits until current snapshot cleanup is completed, if + * outstanding. * @return Previous auto snapshot cleanup value wrapped by a {@link CompletableFuture}. */ CompletableFuture snapshotCleanupSwitch(boolean on, boolean sync); /** * Query the current state of the auto snapshot cleanup based on TTL. - * - * @return true if the auto snapshot cleanup is enabled, false otherwise. - * The return value will be wrapped by a {@link CompletableFuture}. + * @return true if the auto snapshot cleanup is enabled, false otherwise. The return value will be + * wrapped by a {@link CompletableFuture}. */ CompletableFuture isSnapshotCleanupEnabled(); /** - * Retrieves online slow RPC logs from the provided list of - * RegionServers - * + * Retrieves online slow RPC logs from the provided list of RegionServers * @param serverNames Server names to get slowlog responses from * @param logQueryFilter filter to be used if provided * @return Online slowlog response list. The return value wrapped by a {@link CompletableFuture} - * @deprecated since 2.4.0 and will be removed in 4.0.0. - * Use {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. + * @deprecated since 2.4.0 and will be removed in 4.0.0. Use + * {@link #getLogEntries(Set, String, ServerType, int, Map)} instead. */ @Deprecated - default CompletableFuture> getSlowLogResponses( - final Set serverNames, final LogQueryFilter logQueryFilter) { + default CompletableFuture> + getSlowLogResponses(final Set serverNames, final LogQueryFilter logQueryFilter) { String logType; if (LogQueryFilter.Type.LARGE_LOG.equals(logQueryFilter.getType())) { logType = "LARGE_LOG"; @@ -1624,31 +1609,26 @@ default CompletableFuture> getSlowLogResponses( filterParams.put("tableName", logQueryFilter.getTableName()); filterParams.put("userName", logQueryFilter.getUserName()); filterParams.put("filterByOperator", logQueryFilter.getFilterByOperator().toString()); - CompletableFuture> logEntries = - getLogEntries(serverNames, logType, ServerType.REGION_SERVER, logQueryFilter.getLimit(), - filterParams); - return logEntries.thenApply( - logEntryList -> logEntryList.stream().map(logEntry -> (OnlineLogRecord) logEntry) - .collect(Collectors.toList())); + CompletableFuture> logEntries = getLogEntries(serverNames, logType, + ServerType.REGION_SERVER, logQueryFilter.getLimit(), filterParams); + return logEntries.thenApply(logEntryList -> logEntryList.stream() + .map(logEntry -> (OnlineLogRecord) logEntry).collect(Collectors.toList())); } /** - * Clears online slow RPC logs from the provided list of - * RegionServers - * + * Clears online slow RPC logs from the provided list of RegionServers * @param serverNames Set of Server names to clean slowlog responses from - * @return List of booleans representing if online slowlog response buffer is cleaned - * from each RegionServer. The return value wrapped by a {@link CompletableFuture} + * @return List of booleans representing if online slowlog response buffer is cleaned from each + * RegionServer. The return value wrapped by a {@link CompletableFuture} */ CompletableFuture> clearSlowLogResponses(final Set serverNames); /** - * Retrieve recent online records from HMaster / RegionServers. - * Examples include slow/large RPC logs, balancer decisions by master. - * - * @param serverNames servers to retrieve records from, useful in case of records maintained - * by RegionServer as we can select specific server. In case of servertype=MASTER, logs will - * only come from the currently active master. + * Retrieve recent online records from HMaster / RegionServers. Examples include slow/large RPC + * logs, balancer decisions by master. + * @param serverNames servers to retrieve records from, useful in case of records maintained by + * RegionServer as we can select specific server. In case of servertype=MASTER, logs will + * only come from the currently active master. * @param logType string representing type of log records * @param serverType enum for server type: HMaster or RegionServer * @param limit put a limit to list of records that server should send in response @@ -1656,5 +1636,5 @@ default CompletableFuture> getSlowLogResponses( * @return Log entries representing online records from servers */ CompletableFuture> getLogEntries(Set serverNames, String logType, - ServerType serverType, int limit, Map filterParams); + ServerType serverType, int limit, Map filterParams); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java index 49bc350bb9a6..a00a7c64f906 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java index ffb3ae97ecff..0ad6629aa14a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java index 7a381db39c82..6bcb451e945d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public AsyncAdminRequestRetryingCaller(Timer retryTimer, AsyncConnectionImpl con long pauseNs, long pauseForCQTBENs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { super(retryTimer, conn, priority, pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + rpcTimeoutNs, startLogErrorsCnt); this.serverName = serverName; this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java index 7af385da2d83..91732b1398e6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBatchRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -132,7 +132,7 @@ public RegionRequest(HRegionLocation loc) { private static final class ServerRequest { public final ConcurrentMap actionsByRegion = - new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(Bytes.BYTES_COMPARATOR); public void addAction(HRegionLocation loc, Action action) { computeIfAbsent(actionsByRegion, loc.getRegion().getRegionName(), @@ -145,7 +145,7 @@ public void setRegionRequest(byte[] regionName, RegionRequest regionReq) { public int getPriority() { return actionsByRegion.values().stream().flatMap(rr -> rr.actions.stream()) - .mapToInt(Action::getPriority).max().orElse(HConstants.PRIORITY_UNSET); + .mapToInt(Action::getPriority).max().orElse(HConstants.PRIORITY_UNSET); } } @@ -218,10 +218,11 @@ private void logException(int tries, Supplier> regionsSupp Throwable error, ServerName serverName) { if (tries > startLogErrorsCnt) { String regions = - regionsSupplier.get().map(r -> "'" + r.loc.getRegion().getRegionNameAsString() + "'") - .collect(Collectors.joining(",", "[", "]")); - LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName + - " failed, tries=" + tries, error); + regionsSupplier.get().map(r -> "'" + r.loc.getRegion().getRegionNameAsString() + "'") + .collect(Collectors.joining(",", "[", "]")); + LOG.warn("Process batch for " + regions + " in " + tableName + " from " + serverName + + " failed, tries=" + tries, + error); } } @@ -235,7 +236,7 @@ private void addError(Action action, Throwable error, ServerName serverName) { errors = action2Errors.computeIfAbsent(action, k -> new ArrayList<>()); } errors.add(new ThrowableWithExtraContext(error, EnvironmentEdgeManager.currentTime(), - getExtraContextForError(serverName))); + getExtraContextForError(serverName))); } private void addError(Iterable actions, Throwable error, ServerName serverName) { @@ -248,7 +249,7 @@ private void failOne(Action action, int tries, Throwable error, long currentTime return; } ThrowableWithExtraContext errorWithCtx = - new ThrowableWithExtraContext(error, currentTime, extras); + new ThrowableWithExtraContext(error, currentTime, extras); List errors = removeErrors(action); if (errors == null) { errors = Collections.singletonList(errorWithCtx); @@ -271,7 +272,7 @@ private void failAll(Stream actions, int tries) { return; } future.completeExceptionally(new RetriesExhaustedException(tries, - Optional.ofNullable(removeErrors(action)).orElse(Collections.emptyList()))); + Optional.ofNullable(removeErrors(action)).orElse(Collections.emptyList()))); }); } @@ -288,10 +289,10 @@ private ClientProtos.MultiRequest buildReq(Map actionsByR // action list. RequestConverter.buildNoDataRegionActions(entry.getKey(), entry.getValue().actions.stream() - .sorted((a1, a2) -> Integer.compare(a1.getOriginalIndex(), a2.getOriginalIndex())) - .collect(Collectors.toList()), - cells, multiRequestBuilder, regionActionBuilder, actionBuilder, mutationBuilder, - nonceGroup, indexMap); + .sorted((a1, a2) -> Integer.compare(a1.getOriginalIndex(), a2.getOriginalIndex())) + .collect(Collectors.toList()), + cells, multiRequestBuilder, regionActionBuilder, actionBuilder, mutationBuilder, nonceGroup, + indexMap); } return multiRequestBuilder.build(); } @@ -302,9 +303,9 @@ private void onComplete(Action action, RegionRequest regionReq, int tries, Serve MutableBoolean retryImmediately) { Object result = regionResult.result.getOrDefault(action.getOriginalIndex(), regionException); if (result == null) { - LOG.error("Server " + serverName + " sent us neither result nor exception for row '" + - Bytes.toStringBinary(action.getAction().getRow()) + "' of " + - regionReq.loc.getRegion().getRegionNameAsString()); + LOG.error("Server " + serverName + " sent us neither result nor exception for row '" + + Bytes.toStringBinary(action.getAction().getRow()) + "' of " + + regionReq.loc.getRegion().getRegionNameAsString()); addError(action, new RuntimeException("Invalid response"), serverName); failedActions.add(action); } else if (result instanceof Throwable) { @@ -406,8 +407,8 @@ private void sendToServer(ServerName serverName, ServerRequest serverReq, int tr onError(serverReq.actionsByRegion, tries, controller.getFailed(), serverName); } else { try { - onComplete(serverReq.actionsByRegion, tries, serverName, ResponseConverter.getResults(req, - indexMap, resp, controller.cellScanner())); + onComplete(serverReq.actionsByRegion, tries, serverName, + ResponseConverter.getResults(req, indexMap, resp, controller.cellScanner())); } catch (Exception e) { onError(serverReq.actionsByRegion, tries, e, serverName); return; @@ -436,7 +437,7 @@ private void sendOrDelay(Map actionsByServer, int tri serverReq.actionsByRegion.forEach((regionName, regionReq) -> { long backoff = backoffPolicy.getBackoffTime(serverName, regionName, serverStats); groupByBackoff.computeIfAbsent(backoff, k -> new ServerRequest()) - .setRegionRequest(regionName, regionReq); + .setRegionRequest(regionName, regionReq); }); groupByBackoff.forEach((backoff, sr) -> { if (backoff > 0) { @@ -463,7 +464,7 @@ private void onError(Map actionsByRegion, int tries, Thro return; } List copiedActions = actionsByRegion.values().stream().flatMap(r -> r.actions.stream()) - .collect(Collectors.toList()); + .collect(Collectors.toList()); addError(copiedActions, error, serverName); tryResubmit(copiedActions.stream(), tries, error instanceof RetryImmediatelyException, error instanceof CallQueueTooBigException); @@ -504,22 +505,23 @@ private void groupAndSend(Stream actions, int tries) { ConcurrentMap actionsByServer = new ConcurrentHashMap<>(); ConcurrentLinkedQueue locateFailed = new ConcurrentLinkedQueue<>(); addListener(CompletableFuture.allOf(actions - .map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(), - RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> { - if (error != null) { - error = unwrapCompletionException(translateException(error)); - if (error instanceof DoNotRetryIOException) { - failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), ""); - return; + .map(action -> conn.getLocator().getRegionLocation(tableName, action.getAction().getRow(), + RegionLocateType.CURRENT, locateTimeoutNs).whenComplete((loc, error) -> { + if (error != null) { + error = unwrapCompletionException(translateException(error)); + if (error instanceof DoNotRetryIOException) { + failOne(action, tries, error, EnvironmentEdgeManager.currentTime(), ""); + return; + } + addError(action, error, null); + locateFailed.add(action); + } else { + computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new) + .addAction(loc, action); } - addError(action, error, null); - locateFailed.add(action); - } else { - computeIfAbsent(actionsByServer, loc.getServerName(), ServerRequest::new).addAction(loc, - action); - } - })) - .toArray(CompletableFuture[]::new)), (v, r) -> { + })) + .toArray(CompletableFuture[]::new)), + (v, r) -> { if (!actionsByServer.isEmpty()) { sendOrDelay(actionsByServer, tries); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java index 7b21eb5fa13a..e5f28d2e0602 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java index ea2528d5152c..ed21fb8e23ef 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java index cd0496377bc4..3b5f5ea6ccae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorBuilderImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -103,6 +103,6 @@ public AsyncBufferedMutatorBuilder setMaxKeyValueSize(int maxKeyValueSize) { @Override public AsyncBufferedMutator build() { return new AsyncBufferedMutatorImpl(periodicalFlushTimer, tableBuilder.build(), writeBufferSize, - periodicFlushTimeoutNs, maxKeyValueSize); + periodicFlushTimeoutNs, maxKeyValueSize); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java index a7e5f3ff6d1f..30f9cb13334c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncBufferedMutatorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,9 +30,10 @@ import java.util.stream.Stream; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableName; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; import org.apache.hbase.thirdparty.io.netty.util.Timeout; -import org.apache.yetus.audience.InterfaceAudience; /** * The implementation of {@link AsyncBufferedMutator}. Simply wrap an {@link AsyncTable}. @@ -110,13 +111,13 @@ protected void internalFlush() { @Override public List> mutate(List mutations) { List> futures = - Stream.> generate(CompletableFuture::new).limit(mutations.size()) - .collect(Collectors.toList()); + Stream.> generate(CompletableFuture::new).limit(mutations.size()) + .collect(Collectors.toList()); long heapSize = 0; for (Mutation mutation : mutations) { heapSize += mutation.heapSize(); if (mutation instanceof Put) { - validatePut((Put)mutation, maxKeyValueSize); + validatePut((Put) mutation, maxKeyValueSize); } } synchronized (this) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java index 48f004c0a29c..77ae4d17cd72 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncClientScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -165,13 +165,13 @@ private CompletableFuture callOpenScanner(HBaseRpcControlle private void startScan(OpenScannerResponse resp) { addListener( conn.callerFactory.scanSingleRegion().id(resp.resp.getScannerId()).location(resp.loc) - .remote(resp.isRegionServerRemote) - .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) - .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) - .pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).start(resp.controller, resp.resp), + .remote(resp.isRegionServerRemote) + .scannerLeaseTimeoutPeriod(resp.resp.getTtl(), TimeUnit.MILLISECONDS).stub(resp.stub) + .setScan(scan).metrics(scanMetrics).consumer(consumer).resultCache(resultCache) + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .scanTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) + .pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) + .startLogErrorsCnt(startLogErrorsCnt).start(resp.controller, resp.resp), (hasMore, error) -> { if (error != null) { consumer.onError(error); @@ -187,17 +187,16 @@ private void startScan(OpenScannerResponse resp) { private CompletableFuture openScanner(int replicaId) { return conn.callerFactory. single().table(tableName) - .row(scan.getStartRow()).replicaId(replicaId).locateType(getLocateType(scan)) - .priority(scan.getPriority()) - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) - .pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) - .startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner).call(); + .row(scan.getStartRow()).replicaId(replicaId).locateType(getLocateType(scan)) + .priority(scan.getPriority()).rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(scanTimeoutNs, TimeUnit.NANOSECONDS).pause(pauseNs, TimeUnit.NANOSECONDS) + .pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS).maxAttempts(maxAttempts) + .startLogErrorsCnt(startLogErrorsCnt).action(this::callOpenScanner).call(); } private long getPrimaryTimeoutNs() { return TableName.isMetaTableName(tableName) ? conn.connConf.getPrimaryMetaScanTimeoutNs() - : conn.connConf.getPrimaryScanTimeoutNs(); + : conn.connConf.getPrimaryScanTimeoutNs(); } private void openScanner() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java index d04b5f2cebe1..d461383875f2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java index cddfcb926ed5..506b11584b03 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -125,9 +125,9 @@ class AsyncConnectionConfiguration { long rpcTimeoutMs = conf.getLong(HBASE_RPC_TIMEOUT_KEY, DEFAULT_HBASE_RPC_TIMEOUT); this.rpcTimeoutNs = TimeUnit.MILLISECONDS.toNanos(rpcTimeoutMs); this.readRpcTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY, rpcTimeoutMs)); + TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_READ_TIMEOUT_KEY, rpcTimeoutMs)); this.writeRpcTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY, rpcTimeoutMs)); + TimeUnit.MILLISECONDS.toNanos(conf.getLong(HBASE_RPC_WRITE_TIMEOUT_KEY, rpcTimeoutMs)); long pauseMs = conf.getLong(HBASE_CLIENT_PAUSE, DEFAULT_HBASE_CLIENT_PAUSE); long pauseForCQTBEMs = conf.getLong(HBASE_CLIENT_PAUSE_FOR_CQTBE, pauseMs); if (pauseForCQTBEMs < pauseMs) { @@ -140,27 +140,26 @@ class AsyncConnectionConfiguration { this.pauseForCQTBENs = TimeUnit.MILLISECONDS.toNanos(pauseForCQTBEMs); this.maxRetries = conf.getInt(HBASE_CLIENT_RETRIES_NUMBER, DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.startLogErrorsCnt = - conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT); - this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos( - conf.getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, - DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); + conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT); + this.scanTimeoutNs = TimeUnit.MILLISECONDS.toNanos(conf + .getInt(HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)); this.scannerCaching = - conf.getInt(HBASE_CLIENT_SCANNER_CACHING, DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + conf.getInt(HBASE_CLIENT_SCANNER_CACHING, DEFAULT_HBASE_CLIENT_SCANNER_CACHING); this.metaScannerCaching = - conf.getInt(HBASE_META_SCANNER_CACHING, DEFAULT_HBASE_META_SCANNER_CACHING); + conf.getInt(HBASE_META_SCANNER_CACHING, DEFAULT_HBASE_META_SCANNER_CACHING); this.scannerMaxResultSize = conf.getLong(HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT); this.writeBufferPeriodicFlushTimeoutNs = - TimeUnit.MILLISECONDS.toNanos(conf.getLong(WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT)); + TimeUnit.MILLISECONDS.toNanos(conf.getLong(WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, + WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT)); this.primaryCallTimeoutNs = TimeUnit.MICROSECONDS.toNanos( conf.getLong(PRIMARY_CALL_TIMEOUT_MICROSECOND, PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT)); this.primaryScanTimeoutNs = TimeUnit.MICROSECONDS.toNanos( conf.getLong(PRIMARY_SCAN_TIMEOUT_MICROSECOND, PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT)); this.primaryMetaScanTimeoutNs = - TimeUnit.MICROSECONDS.toNanos(conf.getLong(HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, - HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT)); + TimeUnit.MICROSECONDS.toNanos(conf.getLong(HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, + HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT)); this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java index 1eebcab4c93c..0810308493eb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncConnectionImpl.java @@ -27,6 +27,7 @@ import static org.apache.hadoop.hbase.client.NonceGenerator.CLIENT_NONCES_ENABLED_KEY; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.SERVER_NAME_KEY; import static org.apache.hadoop.hbase.util.FutureUtils.addListener; + import io.opentelemetry.api.trace.Span; import java.io.IOException; import java.util.Optional; @@ -56,8 +57,10 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; @@ -72,9 +75,9 @@ public class AsyncConnectionImpl implements AsyncConnection { private static final Logger LOG = LoggerFactory.getLogger(AsyncConnectionImpl.class); static final HashedWheelTimer RETRY_TIMER = new HashedWheelTimer( - new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), - 10, TimeUnit.MILLISECONDS); + new ThreadFactoryBuilder().setNameFormat("Async-Client-Retry-Timer-pool-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(), + 10, TimeUnit.MILLISECONDS); private final Configuration conf; @@ -102,7 +105,7 @@ public class AsyncConnectionImpl implements AsyncConnection { private final AtomicReference masterStub = new AtomicReference<>(); private final AtomicReference> masterStubMakeFuture = - new AtomicReference<>(); + new AtomicReference<>(); private final Optional stats; private final ClientBackoffPolicy backoffPolicy; @@ -132,8 +135,8 @@ public AsyncConnectionImpl(Configuration conf, ConnectionRegistry registry, Stri } this.rpcClient = RpcClientFactory.createClient(conf, clusterId, metrics.orElse(null)); this.rpcControllerFactory = RpcControllerFactory.instantiate(conf); - this.rpcTimeout = - (int) Math.min(Integer.MAX_VALUE, TimeUnit.NANOSECONDS.toMillis(connConf.getRpcTimeoutNs())); + this.rpcTimeout = (int) Math.min(Integer.MAX_VALUE, + TimeUnit.NANOSECONDS.toMillis(connConf.getRpcTimeoutNs())); this.locator = new AsyncRegionLocator(this, RETRY_TIMER); this.callerFactory = new AsyncRpcRetryingCallerFactory(this, RETRY_TIMER); if (conf.getBoolean(CLIENT_NONCES_ENABLED_KEY, true)) { @@ -289,7 +292,7 @@ CompletableFuture getMasterStub() { future.completeExceptionally(error); } else if (addr == null) { future.completeExceptionally(new MasterNotRunningException( - "ZooKeeper available but no active master location found")); + "ZooKeeper available but no active master location found")); } else { LOG.debug("The fetched master address is {}", addr); try { @@ -329,13 +332,13 @@ public AsyncTable build() { @Override public AsyncTableBuilder getTableBuilder(TableName tableName, - ExecutorService pool) { + ExecutorService pool) { return new AsyncTableBuilderBase(tableName, connConf) { @Override public AsyncTable build() { RawAsyncTableImpl rawTable = - new RawAsyncTableImpl(AsyncConnectionImpl.this, RETRY_TIMER, this); + new RawAsyncTableImpl(AsyncConnectionImpl.this, RETRY_TIMER, this); return new AsyncTableImpl(rawTable, pool); } }; @@ -357,7 +360,7 @@ public AsyncAdminBuilder getAdminBuilder(ExecutorService pool) { @Override public AsyncAdmin build() { RawAsyncHBaseAdmin rawAdmin = - new RawAsyncHBaseAdmin(AsyncConnectionImpl.this, RETRY_TIMER, this); + new RawAsyncHBaseAdmin(AsyncConnectionImpl.this, RETRY_TIMER, this); return new AsyncHBaseAdmin(rawAdmin, pool); } }; @@ -370,9 +373,9 @@ public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName @Override public AsyncBufferedMutatorBuilder getBufferedMutatorBuilder(TableName tableName, - ExecutorService pool) { + ExecutorService pool) { return new AsyncBufferedMutatorBuilderImpl(connConf, getTableBuilder(tableName, pool), - RETRY_TIMER); + RETRY_TIMER); } private Hbck getHbckInternal(ServerName masterServer) { @@ -380,8 +383,10 @@ private Hbck getHbckInternal(ServerName masterServer) { // we will not create a new connection when creating a new protobuf stub, and for hbck there // will be no performance consideration, so for simplification we will create a new stub every // time instead of caching the stub here. - return new HBaseHbck(MasterProtos.HbckService.newBlockingStub( - rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout)), rpcControllerFactory); + return new HBaseHbck( + MasterProtos.HbckService + .newBlockingStub(rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout)), + rpcControllerFactory); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java index e2f8094bd75d..08630e509243 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncHBaseAdmin.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.client; import com.google.protobuf.RpcChannel; - import java.util.EnumSet; import java.util.List; import java.util.Map; @@ -206,7 +205,7 @@ public CompletableFuture modifyColumnFamily(TableName tableName, @Override public CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, - byte[] family, String dstSFT) { + byte[] family, String dstSFT) { return wrap(rawAdmin.modifyColumnFamilyStoreFileTracker(tableName, family, dstSFT)); } @@ -276,14 +275,13 @@ public CompletableFuture flushRegionServer(ServerName sn) { } @Override - public CompletableFuture compact(TableName tableName, - CompactType compactType) { + public CompletableFuture compact(TableName tableName, CompactType compactType) { return wrap(rawAdmin.compact(tableName, compactType)); } @Override - public CompletableFuture compact(TableName tableName, - byte[] columnFamily, CompactType compactType) { + public CompletableFuture compact(TableName tableName, byte[] columnFamily, + CompactType compactType) { return wrap(rawAdmin.compact(tableName, columnFamily, compactType)); } @@ -409,8 +407,8 @@ public CompletableFuture> getQuota(QuotaFilter filter) { } @Override - public CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled) { + public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) { return wrap(rawAdmin.addReplicationPeer(peerId, peerConfig, enabled)); } @@ -494,7 +492,7 @@ public CompletableFuture restoreSnapshot(String snapshotName) { @Override public CompletableFuture restoreSnapshot(String snapshotName, boolean takeFailSafeSnapshot, - boolean restoreAcl) { + boolean restoreAcl) { return wrap(rawAdmin.restoreSnapshot(snapshotName, takeFailSafeSnapshot, restoreAcl)); } @@ -668,8 +666,8 @@ public CompletableFuture isMasterInMaintenanceMode() { } @Override - public CompletableFuture getCompactionState( - TableName tableName, CompactType compactType) { + public CompletableFuture getCompactionState(TableName tableName, + CompactType compactType) { return wrap(rawAdmin.getCompactionState(tableName, compactType)); } @@ -684,8 +682,8 @@ public CompletableFuture> getLastMajorCompactionTimestamp(TableNa } @Override - public CompletableFuture> getLastMajorCompactionTimestampForRegion( - byte[] regionName) { + public CompletableFuture> + getLastMajorCompactionTimestampForRegion(byte[] regionName) { return wrap(rawAdmin.getLastMajorCompactionTimestampForRegion(regionName)); } @@ -809,8 +807,8 @@ public CompletableFuture> getSpaceQuotaTableSizes() { } @Override - public CompletableFuture> getRegionServerSpaceQuotaSnapshots( - ServerName serverName) { + public CompletableFuture> + getRegionServerSpaceQuotaSnapshots(ServerName serverName) { return wrap(rawAdmin.getRegionServerSpaceQuotaSnapshots(serverName)); } @@ -848,8 +846,7 @@ public CompletableFuture> hasUserPermissions(String userName, } @Override - public CompletableFuture snapshotCleanupSwitch(final boolean on, - final boolean sync) { + public CompletableFuture snapshotCleanupSwitch(final boolean on, final boolean sync) { return wrap(rawAdmin.snapshotCleanupSwitch(on, sync)); } @@ -865,8 +862,7 @@ public CompletableFuture> clearSlowLogResponses(Set se @Override public CompletableFuture> getLogEntries(Set serverNames, - String logType, ServerType serverType, int limit, - Map filterParams) { + String logType, ServerType serverType, int limit, Map filterParams) { return wrap(rawAdmin.getLogEntries(serverNames, logType, serverType, limit, filterParams)); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java index de2778cf6d78..33f2604a60ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,14 +47,14 @@ public AsyncMasterRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl Callable callable, int priority, long pauseNs, long pauseForCQTBENs, int maxRetries, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseForCQTBENs, maxRetries, operationTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + rpcTimeoutNs, startLogErrorsCnt); this.callable = callable; } private void clearMasterStubCacheOnError(MasterService.Interface stub, Throwable error) { // ServerNotRunningYetException may because it is the backup master. - if (ClientExceptionsUtil.isConnectionException(error) || - error instanceof ServerNotRunningYetException) { + if (ClientExceptionsUtil.isConnectionException(error) + || error instanceof ServerNotRunningYetException) { conn.clearMasterStubCache(stub); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java index 9df8efb8a63d..77ee0de9b1d7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ class AsyncMetaRegionLocator { private final AtomicReference metaRegionLocations = new AtomicReference<>(); private final AtomicReference> metaRelocateFuture = - new AtomicReference<>(); + new AtomicReference<>(); AsyncMetaRegionLocator(ConnectionRegistry registry) { this.registry = registry; @@ -77,8 +77,8 @@ private void addLocationToCache(HRegionLocation loc) { } } HRegionLocation oldLoc = oldLocs.getRegionLocation(replicaId); - if (oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() || - oldLoc.getServerName().equals(loc.getServerName()))) { + if (oldLoc != null && (oldLoc.getSeqNum() > loc.getSeqNum() + || oldLoc.getServerName().equals(loc.getServerName()))) { return; } RegionLocations newLocs = replaceRegionLocation(oldLocs, loc); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java index 5798ee5f1fbd..1e37ded8eb93 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncNonMetaRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,11 +59,12 @@ import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.client.Scan.ReadType; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hbase.thirdparty.com.google.common.base.Objects; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.base.Objects; + /** * The asynchronous locator for regions other than meta. */ @@ -73,7 +74,7 @@ class AsyncNonMetaRegionLocator { private static final Logger LOG = LoggerFactory.getLogger(AsyncNonMetaRegionLocator.class); static final String MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = - "hbase.client.meta.max.concurrent.locate.per.table"; + "hbase.client.meta.max.concurrent.locate.per.table"; private static final int DEFAULT_MAX_CONCURRENT_LOCATE_REQUEST_PER_TABLE = 8; @@ -123,12 +124,12 @@ public boolean equals(Object obj) { private static final class TableCache { private final ConcurrentNavigableMap cache = - new ConcurrentSkipListMap<>(BYTES_COMPARATOR); + new ConcurrentSkipListMap<>(BYTES_COMPARATOR); private final Set pendingRequests = new HashSet<>(); private final Map> allRequests = - new LinkedHashMap<>(); + new LinkedHashMap<>(); public boolean hasQuota(int max) { return pendingRequests.size() < max; @@ -148,7 +149,7 @@ public Optional getCandidate() { public void clearCompletedRequests(RegionLocations locations) { for (Iterator>> iter = - allRequests.entrySet().iterator(); iter.hasNext();) { + allRequests.entrySet().iterator(); iter.hasNext();) { Map.Entry> entry = iter.next(); if (tryComplete(entry.getKey(), entry.getValue(), locations)) { iter.remove(); @@ -177,8 +178,8 @@ private boolean tryComplete(LocateRequest req, CompletableFuture 0 || Bytes.equals(EMPTY_END_ROW, endKey)) && - Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0); + completed = c == 0 || ((c > 0 || Bytes.equals(EMPTY_END_ROW, endKey)) + && Bytes.compareTo(loc.getRegion().getStartKey(), req.row) < 0); } else { completed = loc.getRegion().containsRow(req.row); } @@ -196,36 +197,36 @@ private boolean tryComplete(LocateRequest req, CompletableFuture { - int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; - try { - RegionLocations metaLocations = conn.registry.getMetaRegionLocations().get( - conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); + this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, conn.getChoreService(), () -> { + int numOfReplicas = CatalogReplicaLoadBalanceSelector.UNINITIALIZED_NUM_OF_REPLICAS; + try { + RegionLocations metaLocations = conn.registry.getMetaRegionLocations() + .get(conn.connConf.getReadRpcTimeoutNs(), TimeUnit.NANOSECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); break; case NONE: // If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config. - boolean useMetaReplicas = conn.getConfiguration().getBoolean(USE_META_REPLICAS, - DEFAULT_USE_META_REPLICAS); + boolean useMetaReplicas = + conn.getConfiguration().getBoolean(USE_META_REPLICAS, DEFAULT_USE_META_REPLICAS); if (useMetaReplicas) { this.metaReplicaMode = CatalogReplicaMode.HEDGED_READ; } @@ -286,9 +287,10 @@ private RegionLocations addToCache(TableCache tableCache, RegionLocations locs) RegionLocations mergedLocs = oldLocs.mergeLocations(locs); if (isEqual(mergedLocs, oldLocs)) { // the merged one is the same with the old one, give up - LOG.trace("Will not add {} to cache because the old value {} " + - " is newer than us or has the same server name." + - " Maybe it is updated before we replace it", locs, oldLocs); + LOG.trace("Will not add {} to cache because the old value {} " + + " is newer than us or has the same server name." + + " Maybe it is updated before we replace it", + locs, oldLocs); return oldLocs; } if (tableCache.cache.replace(startKey, oldLocs, mergedLocs)) { @@ -298,8 +300,10 @@ private RegionLocations addToCache(TableCache tableCache, RegionLocations locs) // the region is different, here we trust the one we fetched. This maybe wrong but finally // the upper layer can detect this and trigger removal of the wrong locations if (LOG.isDebugEnabled()) { - LOG.debug("The newnly fetch region {} is different from the old one {} for row '{}'," + - " try replaing the old one...", region, oldRegion, Bytes.toStringBinary(startKey)); + LOG.debug( + "The newnly fetch region {} is different from the old one {} for row '{}'," + + " try replaing the old one...", + region, oldRegion, Bytes.toStringBinary(startKey)); } if (tableCache.cache.replace(startKey, oldLocs, locs)) { return locs; @@ -311,8 +315,9 @@ private RegionLocations addToCache(TableCache tableCache, RegionLocations locs) private void complete(TableName tableName, LocateRequest req, RegionLocations locs, Throwable error) { if (error != null) { - LOG.warn("Failed to locate region in '" + tableName + "', row='" + - Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType, error); + LOG.warn("Failed to locate region in '" + tableName + "', row='" + + Bytes.toStringBinary(req.row) + "', locateType=" + req.locateType, + error); } Optional toSend = Optional.empty(); TableCache tableCache = getTableCache(tableName); @@ -422,7 +427,7 @@ private RegionLocations locateRowBeforeInCache(TableCache tableCache, TableName byte[] row, int replicaId) { boolean isEmptyStopRow = isEmptyStopRow(row); Map.Entry entry = - isEmptyStopRow ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row); + isEmptyStopRow ? tableCache.cache.lastEntry() : tableCache.cache.lowerEntry(row); if (entry == null) { recordCacheMiss(); return null; @@ -433,8 +438,8 @@ private RegionLocations locateRowBeforeInCache(TableCache tableCache, TableName recordCacheMiss(); return null; } - if (isEmptyStopRow(loc.getRegion().getEndKey()) || - (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) { + if (isEmptyStopRow(loc.getRegion().getEndKey()) + || (!isEmptyStopRow && Bytes.compareTo(loc.getRegion().getEndKey(), row) >= 0)) { if (LOG.isTraceEnabled()) { LOG.trace("Found {} in cache for {}, row='{}', locateType={}, replicaId={}", loc, tableName, Bytes.toStringBinary(row), RegionLocateType.BEFORE, replicaId); @@ -449,8 +454,8 @@ private RegionLocations locateRowBeforeInCache(TableCache tableCache, TableName private void locateInMeta(TableName tableName, LocateRequest req) { if (LOG.isTraceEnabled()) { - LOG.trace("Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row) + - "', locateType=" + req.locateType + " in meta"); + LOG.trace("Try locate '" + tableName + "', row='" + Bytes.toStringBinary(req.row) + + "', locateType=" + req.locateType + " in meta"); } byte[] metaStartKey; if (req.locateType.equals(RegionLocateType.BEFORE)) { @@ -464,10 +469,10 @@ private void locateInMeta(TableName tableName, LocateRequest req) { metaStartKey = createRegionName(tableName, req.row, NINES, false); } byte[] metaStopKey = - RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); + RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); Scan scan = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey, true) - .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(locatePrefetchLimit) - .setReadType(ReadType.PREAD); + .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(locatePrefetchLimit) + .setReadType(ReadType.PREAD); switch (this.metaReplicaMode) { case LOAD_BALANCE: @@ -503,7 +508,7 @@ public void onComplete() { complete(tableName, req, null, new TableNotFoundException(tableName)); } else if (!completeNormally) { complete(tableName, req, null, new IOException( - "Unable to find region for '" + Bytes.toStringBinary(req.row) + "' in " + tableName)); + "Unable to find region for '" + Bytes.toStringBinary(req.row) + "' in " + tableName)); } } @@ -551,8 +556,8 @@ public void onNext(Result[] results, ScanController controller) { private RegionLocations locateInCache(TableCache tableCache, TableName tableName, byte[] row, int replicaId, RegionLocateType locateType) { return locateType.equals(RegionLocateType.BEFORE) - ? locateRowBeforeInCache(tableCache, tableName, row, replicaId) - : locateRowInCache(tableCache, tableName, row, replicaId); + ? locateRowBeforeInCache(tableCache, tableName, row, replicaId) + : locateRowInCache(tableCache, tableName, row, replicaId); } // locateToPrevious is true means we will use the start key of a region to locate the region @@ -680,7 +685,7 @@ void clearCache(TableName tableName) { } } conn.getConnectionMetrics() - .ifPresent(metrics -> metrics.incrMetaCacheNumClearRegion(tableCache.cache.size())); + .ifPresent(metrics -> metrics.incrMetaCacheNumClearRegion(tableCache.cache.size())); } void clearCache() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 6071cb63645e..69381a659dc9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -30,7 +28,6 @@ import java.util.Objects; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; @@ -47,33 +44,32 @@ import org.slf4j.LoggerFactory; /** - * This class allows a continuous flow of requests. It's written to be compatible with a - * synchronous caller such as HTable. + * This class allows a continuous flow of requests. It's written to be compatible with a synchronous + * caller such as HTable. *

    - * The caller sends a buffer of operation, by calling submit. This class extract from this list - * the operations it can send, i.e. the operations that are on region that are not considered - * as busy. The process is asynchronous, i.e. it returns immediately when if has finished to - * iterate on the list. If, and only if, the maximum number of current task is reached, the call - * to submit will block. Alternatively, the caller can call submitAll, in which case all the - * operations will be sent. Each call to submit returns a future-like object that can be used - * to track operation progress. + * The caller sends a buffer of operation, by calling submit. This class extract from this list the + * operations it can send, i.e. the operations that are on region that are not considered as busy. + * The process is asynchronous, i.e. it returns immediately when if has finished to iterate on the + * list. If, and only if, the maximum number of current task is reached, the call to submit will + * block. Alternatively, the caller can call submitAll, in which case all the operations will be + * sent. Each call to submit returns a future-like object that can be used to track operation + * progress. *

    *

    * The class manages internally the retries. *

    *

    - * The errors are tracked inside the Future object that is returned. - * The results are always tracked inside the Future object and can be retrieved when the call - * has finished. Partial results can also be retrieved if some part of multi-request failed. + * The errors are tracked inside the Future object that is returned. The results are always tracked + * inside the Future object and can be retrieved when the call has finished. Partial results can + * also be retrieved if some part of multi-request failed. *

    *

    - * This class is thread safe. - * Internally, the class is thread safe enough to manage simultaneously new submission and results - * arising from older operations. + * This class is thread safe. Internally, the class is thread safe enough to manage simultaneously + * new submission and results arising from older operations. *

    *

    - * Internally, this class works with {@link Row}, this mean it could be theoretically used for - * gets as well. + * Internally, this class works with {@link Row}, this mean it could be theoretically used for gets + * as well. *

    */ @InterfaceAudience.Private @@ -85,10 +81,10 @@ class AsyncProcess { public static final String PRIMARY_CALL_TIMEOUT_KEY = "hbase.client.primaryCallTimeout.multiget"; /** - * Configure the number of failures after which the client will start logging. A few failures - * is fine: region moved, then is not opened, then is overloaded. We try to have an acceptable - * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at - * this stage. + * Configure the number of failures after which the client will start logging. A few failures is + * fine: region moved, then is not opened, then is overloaded. We try to have an acceptable + * heuristic for the number of errors we don't log. 5 was chosen because we wait for 1s at this + * stage. */ public static final String START_LOG_ERRORS_AFTER_COUNT_KEY = "hbase.client.start.log.errors.counter"; @@ -156,8 +152,9 @@ public void waitUntilDone() throws InterruptedIOException { public static final String LOG_DETAILS_PERIOD = "hbase.client.log.detail.period.ms"; private static final int DEFAULT_LOG_DETAILS_PERIOD = 10000; private final int periodToLog; - AsyncProcess(ClusterConnection hc, Configuration conf, - RpcRetryingCallerFactory rpcCaller, RpcControllerFactory rpcFactory) { + + AsyncProcess(ClusterConnection hc, Configuration conf, RpcRetryingCallerFactory rpcCaller, + RpcControllerFactory rpcFactory) { if (hc == null) { throw new IllegalArgumentException("ClusterConnection cannot be null."); } @@ -166,8 +163,7 @@ public void waitUntilDone() throws InterruptedIOException { this.id = COUNTER.incrementAndGet(); - this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause); if (configuredPauseForCQTBE < pause) { LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: " @@ -179,7 +175,7 @@ public void waitUntilDone() throws InterruptedIOException { } // how many times we could try in total, one more than retry number this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + 1; + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + 1; this.primaryCallTimeoutMicroseconds = conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 10000); this.startLogErrorsCnt = conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT); @@ -204,18 +200,20 @@ public void waitUntilDone() throws InterruptedIOException { } /** - * The submitted task may be not accomplished at all if there are too many running tasks or - * other limits. + * The submitted task may be not accomplished at all if there are too many running tasks or other + * limits. * @param The class to cast the result * @param task The setting and data * @return AsyncRequestFuture */ - public AsyncRequestFuture submit(AsyncProcessTask task) throws InterruptedIOException { + public AsyncRequestFuture submit(AsyncProcessTask task) + throws InterruptedIOException { AsyncRequestFuture reqFuture = checkTask(task); if (reqFuture != null) { return reqFuture; } - SubmittedRows submittedRows = task.getSubmittedRows() == null ? SubmittedRows.ALL : task.getSubmittedRows(); + SubmittedRows submittedRows = + task.getSubmittedRows() == null ? SubmittedRows.ALL : task.getSubmittedRows(); switch (submittedRows) { case ALL: return submitAll(task); @@ -227,15 +225,14 @@ public AsyncRequestFuture submit(AsyncProcessTask task) throw } /** - * Extract from the rows list what we can submit. The rows we can not submit are kept in the - * list. Does not send requests to replicas (not currently used for anything other - * than streaming puts anyway). - * + * Extract from the rows list what we can submit. The rows we can not submit are kept in the list. + * Does not send requests to replicas (not currently used for anything other than streaming puts + * anyway). * @param task The setting and data * @param atLeastOne true if we should submit at least a subset. */ - private AsyncRequestFuture submit(AsyncProcessTask task, - boolean atLeastOne) throws InterruptedIOException { + private AsyncRequestFuture submit(AsyncProcessTask task, boolean atLeastOne) + throws InterruptedIOException { TableName tableName = task.getTableName(); RowAccess rows = task.getRowAccess(); Map actionsByServer = new HashMap<>(); @@ -265,8 +262,8 @@ private AsyncRequestFuture submit(AsyncProcessTask task, throw new IllegalArgumentException("#" + id + ", row cannot be null"); } // Make sure we get 0-s replica. - RegionLocations locs = connection.locateRegion( - tableName, r.getRow(), true, true, RegionReplicaUtil.DEFAULT_REPLICA_ID); + RegionLocations locs = connection.locateRegion(tableName, r.getRow(), true, true, + RegionReplicaUtil.DEFAULT_REPLICA_ID); if (locs == null || locs.isEmpty() || locs.getDefaultRegionLocation() == null) { throw new IOException("#" + id + ", no location found, aborting submit for" + " tableName=" + tableName + " rowkey=" + Bytes.toStringBinary(r.getRow())); @@ -312,21 +309,22 @@ private AsyncRequestFuture submit(AsyncProcessTask task, if (retainedActions.isEmpty()) return NO_REQS_RESULT; - return submitMultiActions(task, retainedActions, nonceGroup, - locationErrors, locationErrorRows, actionsByServer); + return submitMultiActions(task, retainedActions, nonceGroup, locationErrors, locationErrorRows, + actionsByServer); } AsyncRequestFuture submitMultiActions(AsyncProcessTask task, List retainedActions, long nonceGroup, List locationErrors, List locationErrorRows, Map actionsByServer) { - AsyncRequestFutureImpl ars = createAsyncRequestFuture(task, retainedActions, nonceGroup); + AsyncRequestFutureImpl ars = + createAsyncRequestFuture(task, retainedActions, nonceGroup); // Add location errors if any if (locationErrors != null) { for (int i = 0; i < locationErrors.size(); ++i) { int originalIndex = locationErrorRows.get(i); Row row = retainedActions.get(originalIndex).getAction(); - ars.manageError(originalIndex, row, - AsyncRequestFutureImpl.Retry.NO_LOCATION_PROBLEM, locationErrors.get(i), null); + ars.manageError(originalIndex, row, AsyncRequestFutureImpl.Retry.NO_LOCATION_PROBLEM, + locationErrors.get(i), null); } } ars.sendMultiAction(actionsByServer, 1, null, false); @@ -335,7 +333,6 @@ AsyncRequestFuture submitMultiActions(AsyncProcessTask task, /** * Helper that is used when grouping the actions per region server. - * * @param server - server * @param regionName - regionName * @param action - the action to add to the multiaction @@ -374,7 +371,8 @@ private AsyncRequestFuture submitAll(AsyncProcessTask task) { if (r instanceof Put) { Put put = (Put) r; if (put.isEmpty()) { - throw new IllegalArgumentException("No columns to insert for #" + (posInList+1)+ " item"); + throw new IllegalArgumentException( + "No columns to insert for #" + (posInList + 1) + " item"); } highestPriority = Math.max(put.getPriority(), highestPriority); } @@ -382,7 +380,8 @@ private AsyncRequestFuture submitAll(AsyncProcessTask task) { setNonce(ng, r, action); actions.add(action); } - AsyncRequestFutureImpl ars = createAsyncRequestFuture(task, actions, ng.getNonceGroup()); + AsyncRequestFutureImpl ars = + createAsyncRequestFuture(task, actions, ng.getNonceGroup()); ars.groupAndSendMultiAction(actions, 1); return ars; } @@ -425,11 +424,12 @@ private static boolean hasIncrementOrAppend(RowMutations mutations) { private int checkTimeout(String name, int timeout) { if (timeout < 0) { - throw new RuntimeException("The " + name + " must be bigger than zero," - + "current value is" + timeout); + throw new RuntimeException( + "The " + name + " must be bigger than zero," + "current value is" + timeout); } return timeout; } + private int checkOperationTimeout(int operationTimeout) { return checkTimeout("operation timeout", operationTimeout); } @@ -438,24 +438,24 @@ private int checkRpcTimeout(int rpcTimeout) { return checkTimeout("rpc timeout", rpcTimeout); } - AsyncRequestFutureImpl createAsyncRequestFuture( - AsyncProcessTask task, List actions, long nonceGroup) { + AsyncRequestFutureImpl createAsyncRequestFuture(AsyncProcessTask task, + List actions, long nonceGroup) { return new AsyncRequestFutureImpl<>(task, actions, nonceGroup, this); } /** Wait until the async does not have more than max tasks in progress. */ protected void waitForMaximumCurrentTasks(int max, TableName tableName) throws InterruptedIOException { - requestController.waitForMaximumCurrentTasks(max, id, periodToLog, - getLogger(tableName, max)); + requestController.waitForMaximumCurrentTasks(max, id, periodToLog, getLogger(tableName, max)); } private Consumer getLogger(TableName tableName, long max) { return (currentInProgress) -> { - LOG.info("#" + id + (max < 0 ? - ", waiting for any free slot" : - ", waiting for some tasks to finish. Expected max=" + max) + ", tasksInProgress=" - + currentInProgress + (tableName == null ? "" : ", tableName=" + tableName)); + LOG.info("#" + id + + (max < 0 ? ", waiting for any free slot" + : ", waiting for some tasks to finish. Expected max=" + max) + + ", tasksInProgress=" + currentInProgress + + (tableName == null ? "" : ", tableName=" + tableName)); }; } @@ -463,7 +463,6 @@ void incTaskCounters(Collection regions, ServerName sn) { requestController.incTaskCounters(regions, sn); } - void decTaskCounters(Collection regions, ServerName sn) { requestController.decTaskCounters(regions, sn); } @@ -471,25 +470,25 @@ void decTaskCounters(Collection regions, ServerName sn) { /** * Create a caller. Isolated to be easily overridden in the tests. */ - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable callable, int rpcTimeout) { return rpcCallerFactory. newCaller(checkRpcTimeout(rpcTimeout)); } /** - * Creates the server error tracker to use inside process. - * Currently, to preserve the main assumption about current retries, and to work well with - * the retry-limit-based calculation, the calculation is local per Process object. - * We may benefit from connection-wide tracking of server errors. + * Creates the server error tracker to use inside process. Currently, to preserve the main + * assumption about current retries, and to work well with the retry-limit-based calculation, the + * calculation is local per Process object. We may benefit from connection-wide tracking of server + * errors. * @return ServerErrorTracker to use, null if there is no ServerErrorTracker on this connection */ ConnectionImplementation.ServerErrorTracker createServerErrorTracker() { - return new ConnectionImplementation.ServerErrorTracker( - this.serverTrackerTimeout, this.numTries); + return new ConnectionImplementation.ServerErrorTracker(this.serverTrackerTimeout, + this.numTries); } static boolean isReplicaGet(Row row) { - return (row instanceof Get) && (((Get)row).getConsistency() == Consistency.TIMELINE); + return (row instanceof Get) && (((Get) row).getConsistency() == Consistency.TIMELINE); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java index 5a2bbfebc08d..49739fd6efc8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcessTask.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,39 +15,36 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Iterator; import java.util.List; import java.util.concurrent.ExecutorService; import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; -import org.apache.hadoop.hbase.client.coprocessor.Batch; /** - * Contains the attributes of a task which will be executed - * by {@link org.apache.hadoop.hbase.client.AsyncProcess}. - * The attributes will be validated by AsyncProcess. - * It's intended for advanced client applications. + * Contains the attributes of a task which will be executed by + * {@link org.apache.hadoop.hbase.client.AsyncProcess}. The attributes will be validated by + * AsyncProcess. It's intended for advanced client applications. * @param The type of response from server-side */ @InterfaceAudience.Private @InterfaceStability.Evolving public class AsyncProcessTask { /** - * The number of processed rows. - * The AsyncProcess has traffic control which may reject some rows. + * The number of processed rows. The AsyncProcess has traffic control which may reject some rows. */ public enum SubmittedRows { - ALL, - AT_LEAST_ONE, - NORMAL + ALL, AT_LEAST_ONE, NORMAL } + public static Builder newBuilder(final Batch.Callback callback) { return new Builder<>(callback); } + public static Builder newBuilder() { return new Builder(); } @@ -127,10 +123,11 @@ Builder setCallable(CancellableRegionServerCallable callable) { } public AsyncProcessTask build() { - return new AsyncProcessTask<>(pool, tableName, rows, submittedRows, - callback, callable, needResults, rpcTimeout, operationTimeout, results); + return new AsyncProcessTask<>(pool, tableName, rows, submittedRows, callback, callable, + needResults, rpcTimeout, operationTimeout, results); } } + private final ExecutorService pool; private final TableName tableName; private final RowAccess rows; @@ -141,16 +138,16 @@ public AsyncProcessTask build() { private final int rpcTimeout; private final int operationTimeout; private final Object[] results; + AsyncProcessTask(AsyncProcessTask task) { - this(task.getPool(), task.getTableName(), task.getRowAccess(), - task.getSubmittedRows(), task.getCallback(), task.getCallable(), - task.getNeedResults(), task.getRpcTimeout(), task.getOperationTimeout(), - task.getResults()); - } - AsyncProcessTask(ExecutorService pool, TableName tableName, - RowAccess rows, SubmittedRows size, Batch.Callback callback, - CancellableRegionServerCallable callable, boolean needResults, - int rpcTimeout, int operationTimeout, Object[] results) { + this(task.getPool(), task.getTableName(), task.getRowAccess(), task.getSubmittedRows(), + task.getCallback(), task.getCallable(), task.getNeedResults(), task.getRpcTimeout(), + task.getOperationTimeout(), task.getResults()); + } + + AsyncProcessTask(ExecutorService pool, TableName tableName, RowAccess rows, + SubmittedRows size, Batch.Callback callback, CancellableRegionServerCallable callable, + boolean needResults, int rpcTimeout, int operationTimeout, Object[] results) { this.pool = pool; this.tableName = tableName; this.rows = rows; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java index 09cae3571b1a..3b07fff483fa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocator.java @@ -45,12 +45,13 @@ import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.FutureUtils; -import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; -import org.apache.hbase.thirdparty.io.netty.util.Timeout; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer; +import org.apache.hbase.thirdparty.io.netty.util.Timeout; + /** * The asynchronous region locator. */ @@ -75,7 +76,7 @@ class AsyncRegionLocator { } private CompletableFuture withTimeout(CompletableFuture future, long timeoutNs, - Supplier timeoutMsg) { + Supplier timeoutMsg) { if (future.isDone() || timeoutNs <= 0) { return future; } @@ -98,11 +99,8 @@ private boolean isMeta(TableName tableName) { return TableName.isMetaTableName(tableName); } - private CompletableFuture tracedLocationFuture( - Supplier> action, - Function> getRegionNames, - Supplier spanSupplier - ) { + private CompletableFuture tracedLocationFuture(Supplier> action, + Function> getRegionNames, Supplier spanSupplier) { final Span span = spanSupplier.get(); try (Scope scope = span.makeCurrent()) { CompletableFuture future = action.get(); @@ -126,50 +124,44 @@ static List getRegionNames(RegionLocations locs) { if (locs == null || locs.getRegionLocations() == null) { return Collections.emptyList(); } - return Arrays.stream(locs.getRegionLocations()) - .filter(Objects::nonNull) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .collect(Collectors.toList()); + return Arrays.stream(locs.getRegionLocations()).filter(Objects::nonNull) + .map(HRegionLocation::getRegion).map(RegionInfo::getRegionNameAsString) + .collect(Collectors.toList()); } static List getRegionNames(HRegionLocation location) { - return Optional.ofNullable(location) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .map(Collections::singletonList) - .orElseGet(Collections::emptyList); + return Optional.ofNullable(location).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).map(Collections::singletonList) + .orElseGet(Collections::emptyList); } CompletableFuture getRegionLocations(TableName tableName, byte[] row, - RegionLocateType type, boolean reload, long timeoutNs) { + RegionLocateType type, boolean reload, long timeoutNs) { final Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.getRegionLocations") - .setTableName(tableName); + .setName("AsyncRegionLocator.getRegionLocations").setTableName(tableName); return tracedLocationFuture(() -> { - CompletableFuture future = isMeta(tableName) ? - metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) : - nonMetaRegionLocator.getRegionLocations(tableName, row, - RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload); + CompletableFuture future = isMeta(tableName) + ? metaRegionLocator.getRegionLocations(RegionReplicaUtil.DEFAULT_REPLICA_ID, reload) + : nonMetaRegionLocator.getRegionLocations(tableName, row, + RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload); return withTimeout(future, timeoutNs, - () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + - "ms) waiting for region locations for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "'"); + () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + "ms) waiting for region locations for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "'"); }, AsyncRegionLocator::getRegionNames, supplier); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, - int replicaId, RegionLocateType type, boolean reload, long timeoutNs) { + int replicaId, RegionLocateType type, boolean reload, long timeoutNs) { final Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.getRegionLocation") - .setTableName(tableName); + .setName("AsyncRegionLocator.getRegionLocation").setTableName(tableName); return tracedLocationFuture(() -> { // meta region can not be split right now so we always call the same method. // Change it later if the meta table can have more than one regions. CompletableFuture future = new CompletableFuture<>(); CompletableFuture locsFuture = - isMeta(tableName) ? metaRegionLocator.getRegionLocations(replicaId, reload) : - nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload); + isMeta(tableName) ? metaRegionLocator.getRegionLocations(replicaId, reload) + : nonMetaRegionLocator.getRegionLocations(tableName, row, replicaId, type, reload); addListener(locsFuture, (locs, error) -> { if (error != null) { future.completeExceptionally(error); @@ -177,38 +169,38 @@ CompletableFuture getRegionLocation(TableName tableName, byte[] } HRegionLocation loc = locs.getRegionLocation(replicaId); if (loc == null) { - future.completeExceptionally( - new RegionOfflineException("No location for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "', locateType=" + type + ", replicaId=" + replicaId)); + future.completeExceptionally(new RegionOfflineException( + "No location for " + tableName + ", row='" + Bytes.toStringBinary(row) + + "', locateType=" + type + ", replicaId=" + replicaId)); } else if (loc.getServerName() == null) { future.completeExceptionally( - new RegionOfflineException("No server address listed for region '" + - loc.getRegion().getRegionNameAsString() + ", row='" + Bytes.toStringBinary(row) + - "', locateType=" + type + ", replicaId=" + replicaId)); + new RegionOfflineException("No server address listed for region '" + + loc.getRegion().getRegionNameAsString() + ", row='" + Bytes.toStringBinary(row) + + "', locateType=" + type + ", replicaId=" + replicaId)); } else { future.complete(loc); } }); return withTimeout(future, timeoutNs, - () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + - "ms) waiting for region location for " + tableName + ", row='" + - Bytes.toStringBinary(row) + "', replicaId=" + replicaId); + () -> "Timeout(" + TimeUnit.NANOSECONDS.toMillis(timeoutNs) + + "ms) waiting for region location for " + tableName + ", row='" + + Bytes.toStringBinary(row) + "', replicaId=" + replicaId); }, AsyncRegionLocator::getRegionNames, supplier); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, - int replicaId, RegionLocateType type, long timeoutNs) { + int replicaId, RegionLocateType type, long timeoutNs) { return getRegionLocation(tableName, row, replicaId, type, false, timeoutNs); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, - RegionLocateType type, boolean reload, long timeoutNs) { + RegionLocateType type, boolean reload, long timeoutNs) { return getRegionLocation(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID, type, reload, timeoutNs); } CompletableFuture getRegionLocation(TableName tableName, byte[] row, - RegionLocateType type, long timeoutNs) { + RegionLocateType type, long timeoutNs) { return getRegionLocation(tableName, row, type, false, timeoutNs); } @@ -221,9 +213,8 @@ void updateCachedLocationOnError(HRegionLocation loc, Throwable exception) { } void clearCache(TableName tableName) { - Supplier supplier = new TableSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache") - .setTableName(tableName); + Supplier supplier = + new TableSpanBuilder(conn).setName("AsyncRegionLocator.clearCache").setTableName(tableName); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", tableName); if (tableName.equals(META_TABLE_NAME)) { @@ -235,9 +226,9 @@ void clearCache(TableName tableName) { } void clearCache(ServerName serverName) { - Supplier supplier = new ConnectionSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache") - .addAttribute(SERVER_NAME_KEY, serverName.getServerName()); + Supplier supplier = + new ConnectionSpanBuilder(conn).setName("AsyncRegionLocator.clearCache") + .addAttribute(SERVER_NAME_KEY, serverName.getServerName()); TraceUtil.trace(() -> { LOG.debug("Clear meta cache for {}", serverName); metaRegionLocator.clearCache(serverName); @@ -247,8 +238,8 @@ void clearCache(ServerName serverName) { } void clearCache() { - Supplier supplier = new ConnectionSpanBuilder(conn) - .setName("AsyncRegionLocator.clearCache"); + Supplier supplier = + new ConnectionSpanBuilder(conn).setName("AsyncRegionLocator.clearCache"); TraceUtil.trace(() -> { metaRegionLocator.clearCache(); nonMetaRegionLocator.clearCache(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java index 4c6cd5a01172..3f79d040939b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRegionLocatorHelper.java @@ -19,6 +19,7 @@ import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.findException; import static org.apache.hadoop.hbase.exceptions.ClientExceptionsUtil.isMetaClearingException; + import java.util.Arrays; import java.util.function.Consumer; import java.util.function.Function; @@ -50,8 +51,8 @@ static boolean canUpdateOnError(HRegionLocation loc, HRegionLocation oldLoc) { if (oldLoc == null || oldLoc.getServerName() == null) { return false; } - return oldLoc.getSeqNum() <= loc.getSeqNum() && - oldLoc.getServerName().equals(loc.getServerName()); + return oldLoc.getSeqNum() <= loc.getSeqNum() + && oldLoc.getServerName().equals(loc.getServerName()); } static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception, @@ -79,7 +80,7 @@ static void updateCachedLocationOnError(HRegionLocation loc, Throwable exception if (cause instanceof RegionMovedException) { RegionMovedException rme = (RegionMovedException) cause; HRegionLocation newLoc = - new HRegionLocation(loc.getRegion(), rme.getServerName(), rme.getLocationSeqNum()); + new HRegionLocation(loc.getRegion(), rme.getServerName(), rme.getLocationSeqNum()); LOG.debug("Try updating {} with the new location {} constructed by {}", loc, newLoc, rme.toString()); addToCache.accept(newLoc); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFuture.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFuture.java index b91e094d340d..cbad11ffd985 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFuture.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFuture.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,10 +17,9 @@ */ package org.apache.hadoop.hbase.client; -import org.apache.yetus.audience.InterfaceAudience; - import java.io.InterruptedIOException; import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; /** * The context used to wait for results from one submit call. If submit call is made with @@ -31,9 +29,13 @@ @InterfaceAudience.Private public interface AsyncRequestFuture { public boolean hasError(); + public RetriesExhaustedWithDetailsException getErrors(); + public List getFailedOperations(); + public Object[] getResults() throws InterruptedIOException; + /** Wait until all tasks are executed, successfully or not. */ public void waitUntilDone() throws InterruptedIOException; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java index ca6d5342d57a..b1996ff0e54b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -54,13 +52,13 @@ import org.slf4j.LoggerFactory; /** - * The context, and return value, for a single submit/submitAll call. - * Note on how this class (one AP submit) works. Initially, all requests are split into groups - * by server; request is sent to each server in parallel; the RPC calls are not async so a - * thread per server is used. Every time some actions fail, regions/locations might have - * changed, so we re-group them by server and region again and send these groups in parallel - * too. The result, in case of retries, is a "tree" of threads, with parent exiting after - * scheduling children. This is why lots of code doesn't require any synchronization. + * The context, and return value, for a single submit/submitAll call. Note on how this class (one AP + * submit) works. Initially, all requests are split into groups by server; request is sent to each + * server in parallel; the RPC calls are not async so a thread per server is used. Every time some + * actions fail, regions/locations might have changed, so we re-group them by server and region + * again and send these groups in parallel too. The result, in case of retries, is a "tree" of + * threads, with parent exiting after scheduling children. This is why lots of code doesn't require + * any synchronization. */ @InterfaceAudience.Private class AsyncRequestFutureImpl implements AsyncRequestFuture { @@ -70,11 +68,11 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture { private RetryingTimeTracker tracker; /** - * Runnable (that can be submitted to thread pool) that waits for when it's time - * to issue replica calls, finds region replicas, groups the requests by replica and - * issues the calls (on separate threads, via sendMultiAction). - * This is done on a separate thread because we don't want to wait on user thread for - * our asynchronous call, and usually we have to wait before making replica calls. + * Runnable (that can be submitted to thread pool) that waits for when it's time to issue replica + * calls, finds region replicas, groups the requests by replica and issues the calls (on separate + * threads, via sendMultiAction). This is done on a separate thread because we don't want to wait + * on user thread for our asynchronous call, and usually we have to wait before making replica + * calls. */ private final class ReplicaCallIssuingRunnable implements Runnable { private final long startTime; @@ -129,7 +127,7 @@ public void run() { * @param actionsByServer The map by server to add it to. */ private void addReplicaActions(int index, Map actionsByServer, - List unknownReplicaActions) { + List unknownReplicaActions) { if (results[index] != null) return; // opportunistic. Never goes from non-null to null. Action action = initialActions.get(index); RegionLocations loc = findAllLocationsOrFail(action, true); @@ -152,36 +150,36 @@ private void addReplicaActions(int index, Map actionsBy Action replicaAction = new Action(action, i); if (locs[i] != null) { asyncProcess.addAction(locs[i].getServerName(), locs[i].getRegionInfo().getRegionName(), - replicaAction, actionsByServer, nonceGroup); + replicaAction, actionsByServer, nonceGroup); } else { unknownReplicaActions.add(replicaAction); } } } - private void addReplicaActionsAgain( - Action action, Map actionsByServer) { + private void addReplicaActionsAgain(Action action, + Map actionsByServer) { if (action.getReplicaId() == RegionReplicaUtil.DEFAULT_REPLICA_ID) { throw new AssertionError("Cannot have default replica here"); } HRegionLocation loc = getReplicaLocationOrFail(action); if (loc == null) return; - asyncProcess.addAction(loc.getServerName(), loc.getRegionInfo().getRegionName(), - action, actionsByServer, nonceGroup); + asyncProcess.addAction(loc.getServerName(), loc.getRegionInfo().getRegionName(), action, + actionsByServer, nonceGroup); } } /** - * Runnable (that can be submitted to thread pool) that submits MultiAction to a - * single server. The server call is synchronous, therefore we do it on a thread pool. + * Runnable (that can be submitted to thread pool) that submits MultiAction to a single server. + * The server call is synchronous, therefore we do it on a thread pool. */ final class SingleServerRequestRunnable implements Runnable { private final MultiAction multiAction; private final int numAttempt; private final ServerName server; private final Set callsInProgress; - SingleServerRequestRunnable( - MultiAction multiAction, int numAttempt, ServerName server, + + SingleServerRequestRunnable(MultiAction multiAction, int numAttempt, ServerName server, Set callsInProgress) { this.multiAction = multiAction; this.numAttempt = numAttempt; @@ -198,7 +196,8 @@ public void run() { if (callable == null) { callable = createCallable(server, tableName, multiAction); } - RpcRetryingCaller caller = asyncProcess.createCaller(callable,rpcTimeout); + RpcRetryingCaller caller = + asyncProcess.createCaller(callable, rpcTimeout); try { if (callsInProgress != null) { callsInProgress.add(callable); @@ -210,13 +209,14 @@ public void run() { } } catch (IOException e) { // The service itself failed . It may be an error coming from the communication - // layer, but, as well, a functional error raised by the server. + // layer, but, as well, a functional error raised by the server. receiveGlobalFailure(multiAction, server, numAttempt, e); return; } catch (Throwable t) { // This should not happen. Let's log & retry anyway. - LOG.error("id=" + asyncProcess.id + ", caught throwable. Unexpected." + - " Retrying. Server=" + server + ", tableName=" + tableName, t); + LOG.error("id=" + asyncProcess.id + ", caught throwable. Unexpected." + + " Retrying. Server=" + server + ", tableName=" + tableName, + t); receiveGlobalFailure(multiAction, server, numAttempt, t); return; } @@ -249,23 +249,22 @@ public void run() { private final ExecutorService pool; private final Set callsInProgress; - private final TableName tableName; private final AtomicLong actionsInProgress = new AtomicLong(-1); /** - * The lock controls access to results. It is only held when populating results where - * there might be several callers (eventual consistency gets). For other requests, - * there's one unique call going on per result index. + * The lock controls access to results. It is only held when populating results where there might + * be several callers (eventual consistency gets). For other requests, there's one unique call + * going on per result index. */ private final Object replicaResultLock = new Object(); /** - * Result array. Null if results are not needed. Otherwise, each index corresponds to - * the action index in initial actions submitted. For most request types, has null-s for - * requests that are not done, and result/exception for those that are done. - * For eventual-consistency gets, initially the same applies; at some point, replica calls - * might be started, and ReplicaResultState is put at the corresponding indices. The - * returning calls check the type to detect when this is the case. After all calls are done, - * ReplicaResultState-s are replaced with results for the user. + * Result array. Null if results are not needed. Otherwise, each index corresponds to the action + * index in initial actions submitted. For most request types, has null-s for requests that are + * not done, and result/exception for those that are done. For eventual-consistency gets, + * initially the same applies; at some point, replica calls might be started, and + * ReplicaResultState is put at the corresponding indices. The returning calls check the type to + * detect when this is the case. After all calls are done, ReplicaResultState-s are replaced with + * results for the user. */ private final Object[] results; /** @@ -284,17 +283,15 @@ public void run() { * used to make logging more clear, we don't actually care why we don't retry. */ public enum Retry { - YES, - NO_LOCATION_PROBLEM, - NO_NOT_RETRIABLE, - NO_RETRIES_EXHAUSTED, - NO_OTHER_SUCCEEDED + YES, NO_LOCATION_PROBLEM, NO_NOT_RETRIABLE, NO_RETRIES_EXHAUSTED, NO_OTHER_SUCCEEDED } - /** Sync point for calls to multiple replicas for the same user request (Get). - * Created and put in the results array (we assume replica calls require results) when - * the replica calls are launched. See results for details of this process. - * POJO, all fields are public. To modify them, the object itself is locked. */ + /** + * Sync point for calls to multiple replicas for the same user request (Get). Created and put in + * the results array (we assume replica calls require results) when the replica calls are + * launched. See results for details of this process. POJO, all fields are public. To modify them, + * the object itself is locked. + */ private static class ReplicaResultState { public ReplicaResultState(int callCount) { this.callCount = callCount; @@ -302,8 +299,10 @@ public ReplicaResultState(int callCount) { /** Number of calls outstanding, or 0 if a call succeeded (even with others outstanding). */ int callCount; - /** Errors for which it is not decided whether we will report them to user. If one of the - * calls succeeds, we will discard the errors that may have happened in the other calls. */ + /** + * Errors for which it is not decided whether we will report them to user. If one of the calls + * succeeds, we will discard the errors that may have happened in the other calls. + */ BatchErrors replicaErrors = null; @Override @@ -312,8 +311,8 @@ public String toString() { } } - public AsyncRequestFutureImpl(AsyncProcessTask task, List actions, - long nonceGroup, AsyncProcess asyncProcess) { + public AsyncRequestFutureImpl(AsyncProcessTask task, List actions, long nonceGroup, + AsyncProcess asyncProcess) { this.pool = task.getPool(); this.callback = task.getCallback(); this.nonceGroup = nonceGroup; @@ -375,9 +374,9 @@ public AsyncRequestFutureImpl(AsyncProcessTask task, List actions, } else { this.replicaGetIndices = null; } - this.callsInProgress = !hasAnyReplicaGets ? null : - Collections.newSetFromMap( - new ConcurrentHashMap()); + this.callsInProgress = !hasAnyReplicaGets ? null + : Collections + .newSetFromMap(new ConcurrentHashMap()); this.asyncProcess = asyncProcess; this.errorsByServer = createServerErrorTracker(); this.errors = new BatchErrors(); @@ -393,14 +392,13 @@ protected Set getCallsInProgress() { return callsInProgress; } - SingleServerRequestRunnable createSingleServerRequest(MultiAction multiAction, int numAttempt, ServerName server, - Set callsInProgress) { + SingleServerRequestRunnable createSingleServerRequest(MultiAction multiAction, int numAttempt, + ServerName server, Set callsInProgress) { return new SingleServerRequestRunnable(multiAction, numAttempt, server, callsInProgress); } /** * Group a list of actions per region servers, and send them. - * * @param currentActions - the list of row to submit * @param numAttempt - the current numAttempt (first attempt is 1) */ @@ -431,7 +429,8 @@ void groupAndSendMultiAction(List currentActions, int numAttempt) { } } else { byte[] regionName = loc.getRegionInfo().getRegionName(); - AsyncProcess.addAction(loc.getServerName(), regionName, action, actionsByServer, nonceGroup); + AsyncProcess.addAction(loc.getServerName(), regionName, action, actionsByServer, + nonceGroup); } } boolean doStartReplica = (numAttempt == 1 && !isReplica && hasAnyReplicaGets); @@ -439,8 +438,8 @@ void groupAndSendMultiAction(List currentActions, int numAttempt) { if (!actionsByServer.isEmpty()) { // If this is a first attempt to group and send, no replicas, we need replica thread. - sendMultiAction(actionsByServer, numAttempt, (doStartReplica && !hasUnknown) - ? currentActions : null, numAttempt > 1 && !hasUnknown); + sendMultiAction(actionsByServer, numAttempt, + (doStartReplica && !hasUnknown) ? currentActions : null, numAttempt > 1 && !hasUnknown); } if (hasUnknown) { @@ -449,11 +448,11 @@ void groupAndSendMultiAction(List currentActions, int numAttempt) { HRegionLocation loc = getReplicaLocationOrFail(action); if (loc == null) continue; byte[] regionName = loc.getRegionInfo().getRegionName(); - AsyncProcess.addAction(loc.getServerName(), regionName, action, actionsByServer, nonceGroup); + AsyncProcess.addAction(loc.getServerName(), regionName, action, actionsByServer, + nonceGroup); } if (!actionsByServer.isEmpty()) { - sendMultiAction( - actionsByServer, numAttempt, doStartReplica ? currentActions : null, true); + sendMultiAction(actionsByServer, numAttempt, doStartReplica ? currentActions : null, true); } } } @@ -478,23 +477,22 @@ private HRegionLocation getReplicaLocationOrFail(Action action) { } private void manageLocationError(Action action, Exception ex) { - String msg = "Cannot get replica " + action.getReplicaId() - + " location for " + action.getAction(); + String msg = + "Cannot get replica " + action.getReplicaId() + " location for " + action.getAction(); LOG.error(msg); if (ex == null) { ex = new IOException(msg); } - manageError(action.getOriginalIndex(), action.getAction(), - Retry.NO_LOCATION_PROBLEM, ex, null); + manageError(action.getOriginalIndex(), action.getAction(), Retry.NO_LOCATION_PROBLEM, ex, null); } private RegionLocations findAllLocationsOrFail(Action action, boolean useCache) { - if (action.getAction() == null) throw new IllegalArgumentException("#" + asyncProcess.id + - ", row cannot be null"); + if (action.getAction() == null) + throw new IllegalArgumentException("#" + asyncProcess.id + ", row cannot be null"); RegionLocations loc = null; try { - loc = asyncProcess.connection.locateRegion( - tableName, action.getAction().getRow(), useCache, true, action.getReplicaId()); + loc = asyncProcess.connection.locateRegion(tableName, action.getAction().getRow(), useCache, + true, action.getReplicaId()); } catch (IOException ex) { manageLocationError(action, ex); } @@ -502,15 +500,14 @@ private RegionLocations findAllLocationsOrFail(Action action, boolean useCache) } /** - * Send a multi action structure to the servers, after a delay depending on the attempt - * number. Asynchronous. - * + * Send a multi action structure to the servers, after a delay depending on the attempt number. + * Asynchronous. * @param actionsByServer the actions structured by regions * @param numAttempt the attempt number. * @param actionsForReplicaThread original actions for replica thread; null on non-first call. */ - void sendMultiAction(Map actionsByServer, - int numAttempt, List actionsForReplicaThread, boolean reuseThread) { + void sendMultiAction(Map actionsByServer, int numAttempt, + List actionsForReplicaThread, boolean reuseThread) { // Run the last item on the same thread if we are already on a send thread. // We hope most of the time it will be the only item, so we can cut down on threads. int actionsRemaining = actionsByServer.size(); @@ -518,8 +515,8 @@ void sendMultiAction(Map actionsByServer, for (Map.Entry e : actionsByServer.entrySet()) { ServerName server = e.getKey(); MultiAction multiAction = e.getValue(); - Collection runnables = getNewMultiActionRunnable(server, multiAction, - numAttempt); + Collection runnables = + getNewMultiActionRunnable(server, multiAction, numAttempt); // make sure we correctly count the number of runnables before we try to reuse the send // thread, in case we had to split the request into different runnables because of backoff if (runnables.size() > actionsRemaining) { @@ -527,7 +524,8 @@ void sendMultiAction(Map actionsByServer, } // run all the runnables - // HBASE-17475: Do not reuse the thread after stack reach a certain depth to prevent stack overflow + // HBASE-17475: Do not reuse the thread after stack reach a certain depth to prevent stack + // overflow // for now, we use HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER to control the depth for (Runnable runnable : runnables) { if ((--actionsRemaining == 0) && reuseThread @@ -540,8 +538,9 @@ void sendMultiAction(Map actionsByServer, if (t instanceof RejectedExecutionException) { // This should never happen. But as the pool is provided by the end user, // let's secure this a little. - LOG.warn("id=" + asyncProcess.id + ", task rejected by pool. Unexpected." + - " Server=" + server.getServerName(), t); + LOG.warn("id=" + asyncProcess.id + ", task rejected by pool. Unexpected." + " Server=" + + server.getServerName(), + t); } else { // see #HBASE-14359 for more details LOG.warn("Caught unexpected exception/error: ", t); @@ -561,16 +560,15 @@ void sendMultiAction(Map actionsByServer, } private Collection getNewMultiActionRunnable(ServerName server, - MultiAction multiAction, - int numAttempt) { + MultiAction multiAction, int numAttempt) { // no stats to manage, just do the standard action if (asyncProcess.connection.getStatisticsTracker() == null) { if (asyncProcess.connection.getConnectionMetrics() != null) { asyncProcess.connection.getConnectionMetrics().incrNormalRunners(); } asyncProcess.incTaskCounters(multiAction.getRegions(), server); - SingleServerRequestRunnable runnable = createSingleServerRequest( - multiAction, numAttempt, server, callsInProgress); + SingleServerRequestRunnable runnable = + createSingleServerRequest(multiAction, numAttempt, server, callsInProgress); // remove trace for runnable because HBASE-25373 and OpenTelemetry do not cover TraceRunnable return Collections.singletonList(runnable); @@ -593,14 +591,15 @@ private Collection getNewMultiActionRunnable(ServerName serv List toReturn = new ArrayList<>(actions.size()); for (DelayingRunner runner : actions.values()) { asyncProcess.incTaskCounters(runner.getActions().getRegions(), server); - Runnable runnable = createSingleServerRequest(runner.getActions(), numAttempt, server, callsInProgress); + Runnable runnable = + createSingleServerRequest(runner.getActions(), numAttempt, server, callsInProgress); // use a delay runner only if we need to sleep for some time if (runner.getSleepTime() > 0) { runner.setRunner(runnable); runnable = runner; if (asyncProcess.connection.getConnectionMetrics() != null) { asyncProcess.connection.getConnectionMetrics() - .incrDelayRunnersAndUpdateDelayInterval(runner.getSleepTime()); + .incrDelayRunnersAndUpdateDelayInterval(runner.getSleepTime()); } } else { if (asyncProcess.connection.getConnectionMetrics() != null) { @@ -617,14 +616,13 @@ private Collection getNewMultiActionRunnable(ServerName serv /** * @param server server location where the target region is hosted * @param regionName name of the region which we are going to write some data - * @return the amount of time the client should wait until it submit a request to the - * specified server and region + * @return the amount of time the client should wait until it submit a request to the specified + * server and region */ private Long getBackoff(ServerName server, byte[] regionName) { ServerStatisticTracker tracker = asyncProcess.connection.getStatisticsTracker(); ServerStatistics stats = tracker.getStats(server); - return asyncProcess.connection.getBackoffPolicy() - .getBackoffTime(server, regionName, stats); + return asyncProcess.connection.getBackoffPolicy().getBackoffTime(server, regionName, stats); } /** @@ -632,8 +630,8 @@ private Long getBackoff(ServerName server, byte[] regionName) { */ private void startWaitingForReplicaCalls(List actionsForReplicaThread) { long startTime = EnvironmentEdgeManager.currentTime(); - ReplicaCallIssuingRunnable replicaRunnable = new ReplicaCallIssuingRunnable( - actionsForReplicaThread, startTime); + ReplicaCallIssuingRunnable replicaRunnable = + new ReplicaCallIssuingRunnable(actionsForReplicaThread, startTime); if (asyncProcess.primaryCallTimeoutMicroseconds == 0) { // Start replica calls immediately. replicaRunnable.run(); @@ -650,18 +648,16 @@ private void startWaitingForReplicaCalls(List actionsForReplicaThread) { /** * Check that we can retry acts accordingly: logs, set the error status. - * * @param originalIndex the position in the list sent - * @param row the row - * @param canRetry if false, we won't retry whatever the settings. - * @param throwable the throwable, if any (can be null) - * @param server the location, if any (can be null) + * @param row the row + * @param canRetry if false, we won't retry whatever the settings. + * @param throwable the throwable, if any (can be null) + * @param server the location, if any (can be null) * @return true if the action can be retried, false otherwise. */ - Retry manageError(int originalIndex, Row row, Retry canRetry, - Throwable throwable, ServerName server) { - if (canRetry == Retry.YES - && throwable != null && throwable instanceof DoNotRetryIOException) { + Retry manageError(int originalIndex, Row row, Retry canRetry, Throwable throwable, + ServerName server) { + if (canRetry == Retry.YES && throwable != null && throwable instanceof DoNotRetryIOException) { canRetry = Retry.NO_NOT_RETRIABLE; } @@ -676,17 +672,15 @@ Retry manageError(int originalIndex, Row row, Retry canRetry, /** * Resubmit all the actions from this multiaction after a failure. - * - * @param rsActions the actions still to do from the initial list - * @param server the destination + * @param rsActions the actions still to do from the initial list + * @param server the destination * @param numAttempt the number of attempts so far * @param t the throwable (if any) that caused the resubmit */ - private void receiveGlobalFailure( - MultiAction rsActions, ServerName server, int numAttempt, Throwable t) { + private void receiveGlobalFailure(MultiAction rsActions, ServerName server, int numAttempt, + Throwable t) { errorsByServer.reportServerError(server); - Retry canRetry = errorsByServer.canTryMore(numAttempt) - ? Retry.YES : Retry.NO_RETRIES_EXHAUSTED; + Retry canRetry = errorsByServer.canTryMore(numAttempt) ? Retry.YES : Retry.NO_RETRIES_EXHAUSTED; cleanServerCache(server, t); int failed = 0; @@ -700,8 +694,8 @@ private void receiveGlobalFailure( updateCachedLocations(server, regionName, row, ClientExceptionsUtil.isMetaClearingException(t) ? null : t); for (Action action : e.getValue()) { - Retry retry = manageError( - action.getOriginalIndex(), action.getAction(), canRetry, t, server); + Retry retry = + manageError(action.getOriginalIndex(), action.getAction(), canRetry, t, server); if (retry == Retry.YES) { toReplay.add(action); } else if (retry == Retry.NO_OTHER_SUCCEEDED) { @@ -720,19 +714,19 @@ private void receiveGlobalFailure( } /** - * Log as much info as possible, and, if there is something to replay, - * submit it again after a back off sleep. + * Log as much info as possible, and, if there is something to replay, submit it again after a + * back off sleep. */ - private void resubmit(ServerName oldServer, List toReplay, - int numAttempt, int failureCount, Throwable throwable) { + private void resubmit(ServerName oldServer, List toReplay, int numAttempt, + int failureCount, Throwable throwable) { // We have something to replay. We're going to sleep a little before. // We have two contradicting needs here: - // 1) We want to get the new location after having slept, as it may change. - // 2) We want to take into account the location when calculating the sleep time. - // 3) If all this is just because the response needed to be chunked try again FAST. + // 1) We want to get the new location after having slept, as it may change. + // 2) We want to take into account the location when calculating the sleep time. + // 3) If all this is just because the response needed to be chunked try again FAST. // It should be possible to have some heuristics to take the right decision. Short term, - // we go for one. + // we go for one. boolean retryImmediately = throwable instanceof RetryImmediatelyException; int nextAttemptNumber = retryImmediately ? numAttempt : numAttempt + 1; long backOffTime; @@ -746,9 +740,9 @@ private void resubmit(ServerName oldServer, List toReplay, } if (numAttempt > asyncProcess.startLogErrorsCnt) { // We use this value to have some logs when we have multiple failures, but not too many - // logs, as errors are to be expected when a region moves, splits and so on - LOG.info(createLog(numAttempt, failureCount, toReplay.size(), - oldServer, throwable, backOffTime, true, null, -1, -1)); + // logs, as errors are to be expected when a region moves, splits and so on + LOG.info(createLog(numAttempt, failureCount, toReplay.size(), oldServer, throwable, + backOffTime, true, null, -1, -1)); } try { @@ -756,7 +750,8 @@ private void resubmit(ServerName oldServer, List toReplay, Thread.sleep(backOffTime); } } catch (InterruptedException e) { - LOG.warn("#" + asyncProcess.id + ", not sent: " + toReplay.size() + " operations, " + oldServer, e); + LOG.warn( + "#" + asyncProcess.id + ", not sent: " + toReplay.size() + " operations, " + oldServer, e); Thread.currentThread().interrupt(); return; } @@ -764,12 +759,12 @@ private void resubmit(ServerName oldServer, List toReplay, groupAndSendMultiAction(toReplay, nextAttemptNumber); } - private void logNoResubmit(ServerName oldServer, int numAttempt, - int failureCount, Throwable throwable, int failed, int stopped) { + private void logNoResubmit(ServerName oldServer, int numAttempt, int failureCount, + Throwable throwable, int failed, int stopped) { if (failureCount != 0 || numAttempt > asyncProcess.startLogErrorsCnt + 1) { String timeStr = new Date(errorsByServer.getStartTrackingTime()).toString(); - String logMessage = createLog(numAttempt, failureCount, 0, oldServer, - throwable, -1, false, timeStr, failed, stopped); + String logMessage = createLog(numAttempt, failureCount, 0, oldServer, throwable, -1, false, + timeStr, failed, stopped); if (failed != 0) { // Only log final failures as warning LOG.warn(logMessage); @@ -781,11 +776,10 @@ private void logNoResubmit(ServerName oldServer, int numAttempt, /** * Called when we receive the result of a server query. - * - * @param multiAction - the multiAction we sent - * @param server - the location. It's used as a server name. - * @param responses - the response, if any - * @param numAttempt - the attempt + * @param multiAction - the multiAction we sent + * @param server - the location. It's used as a server name. + * @param responses - the response, if any + * @param numAttempt - the attempt */ private void receiveMultiAction(MultiAction multiAction, ServerName server, MultiResponse responses, int numAttempt) { @@ -794,8 +788,8 @@ private void receiveMultiAction(MultiAction multiAction, ServerName server, // Success or partial success // Analyze detailed results. We can still have individual failures to be redo. // two specific throwables are managed: - // - DoNotRetryIOException: we continue to retry for other actions - // - RegionMovedException: we update the cache with the new region location + // - DoNotRetryIOException: we continue to retry for other actions + // - RegionMovedException: we update the cache with the new region location Map results = responses.getResults(); List toReplay = new ArrayList<>(); Throwable lastException = null; @@ -813,15 +807,14 @@ private void receiveMultiAction(MultiAction multiAction, ServerName server, } Map regionResults = - results.containsKey(regionName) ? results.get(regionName).result : Collections.emptyMap(); + results.containsKey(regionName) ? results.get(regionName).result : Collections.emptyMap(); boolean regionFailureRegistered = false; for (Action sentAction : regionEntry.getValue()) { Object result = regionResults.get(sentAction.getOriginalIndex()); if (result == null) { if (regionException == null) { LOG.error("Server sent us neither results nor exceptions for " - + Bytes.toStringBinary(regionName) - + ", numAttempt:" + numAttempt); + + Bytes.toStringBinary(regionName) + ", numAttempt:" + numAttempt); regionException = new RuntimeException("Invalid response"); } // If the row operation encounters the region-lever error, the exception of action may be @@ -833,7 +826,7 @@ private void receiveMultiAction(MultiAction multiAction, ServerName server, Throwable actionException = (Throwable) result; Row row = sentAction.getAction(); lastException = regionException != null ? regionException - : ClientExceptionsUtil.findException(actionException); + : ClientExceptionsUtil.findException(actionException); // Register corresponding failures once per server/once per region. if (!regionFailureRegistered) { regionFailureRegistered = true; @@ -842,12 +835,10 @@ private void receiveMultiAction(MultiAction multiAction, ServerName server, if (retry == null) { errorsByServer.reportServerError(server); // We determine canRetry only once for all calls, after reporting server failure. - retry = errorsByServer.canTryMore(numAttempt) ? - Retry.YES : Retry.NO_RETRIES_EXHAUSTED; + retry = errorsByServer.canTryMore(numAttempt) ? Retry.YES : Retry.NO_RETRIES_EXHAUSTED; } ++failureCount; - switch (manageError(sentAction.getOriginalIndex(), row, retry, actionException, - server)) { + switch (manageError(sentAction.getOriginalIndex(), row, retry, actionException, server)) { case YES: toReplay.add(sentAction); break; @@ -872,13 +863,13 @@ private void receiveMultiAction(MultiAction multiAction, ServerName server, } private void updateCachedLocations(ServerName server, byte[] regionName, byte[] row, - Throwable rowException) { + Throwable rowException) { if (tableName == null) { return; } try { - asyncProcess.connection - .updateCachedLocations(tableName, regionName, row, rowException, server); + asyncProcess.connection.updateCachedLocations(tableName, regionName, row, rowException, + server); } catch (Throwable ex) { // That should never happen, but if it did, we want to make sure // we still process errors @@ -889,12 +880,13 @@ private void updateCachedLocations(ServerName server, byte[] regionName, byte[] private void invokeCallBack(byte[] regionName, byte[] row, CResult result) { if (callback != null) { try { - //noinspection unchecked + // noinspection unchecked // TODO: would callback expect a replica region name if it gets one? this.callback.update(regionName, row, result); } catch (Throwable t) { - LOG.error("User callback threw an exception for " - + Bytes.toStringBinary(regionName) + ", ignoring", t); + LOG.error( + "User callback threw an exception for " + Bytes.toStringBinary(regionName) + ", ignoring", + t); } } } @@ -913,16 +905,16 @@ protected void updateStats(ServerName server, MultiResponse resp) { } private String createLog(int numAttempt, int failureCount, int replaySize, ServerName sn, - Throwable error, long backOffTime, boolean willRetry, String startTime, - int failed, int stopped) { + Throwable error, long backOffTime, boolean willRetry, String startTime, int failed, + int stopped) { StringBuilder sb = new StringBuilder(); - sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName). - append(", attempt=").append(numAttempt).append("/").append(asyncProcess.numTries). - append(", "); + sb.append("id=").append(asyncProcess.id).append(", table=").append(tableName) + .append(", attempt=").append(numAttempt).append("/").append(asyncProcess.numTries) + .append(", "); - if (failureCount > 0 || error != null){ - sb.append("failureCount=").append(failureCount).append("ops").append(", last exception="). - append(error); + if (failureCount > 0 || error != null) { + sb.append("failureCount=").append(failureCount).append("ops").append(", last exception=") + .append(error); } else { sb.append("succeeded"); } @@ -930,12 +922,12 @@ private String createLog(int numAttempt, int failureCount, int replaySize, Serve sb.append(" on ").append(sn).append(", tracking started ").append(startTime); if (willRetry) { - sb.append(", retrying after=").append(backOffTime).append("ms"). - append(", operationsToReplay=").append(replaySize); + sb.append(", retrying after=").append(backOffTime).append("ms") + .append(", operationsToReplay=").append(replaySize); } else if (failureCount > 0) { if (stopped > 0) { - sb.append("; NOT retrying, stopped=").append(stopped). - append(" because successful operation on other replica"); + sb.append("; NOT retrying, stopped=").append(stopped) + .append(" because successful operation on other replica"); } if (failed > 0) { sb.append("; NOT retrying, failed=").append(failed).append(" -- final attempt!"); @@ -1011,7 +1003,8 @@ private void setError(int index, Row row, Throwable throwable, ServerName server boolean isActionDone = false; synchronized (state) { switch (state.callCount) { - case 0: return; // someone already set the result + case 0: + return; // someone already set the result case 1: { // All calls failed, we are the last error. target = errors; isActionDone = true; @@ -1045,9 +1038,9 @@ private void setError(int index, Row row, Throwable throwable, ServerName server } /** - * Checks if the action is complete; used on error to prevent needless retries. - * Does not synchronize, assuming element index/field accesses are atomic. - * This is an opportunistic optimization check, doesn't have to be strict. + * Checks if the action is complete; used on error to prevent needless retries. Does not + * synchronize, assuming element index/field accesses are atomic. This is an opportunistic + * optimization check, doesn't have to be strict. * @param index Original action index. * @param row Original request. */ @@ -1055,15 +1048,15 @@ private boolean isActionComplete(int index, Row row) { if (!AsyncProcess.isReplicaGet(row)) return false; Object resObj = results[index]; return (resObj != null) && (!(resObj instanceof ReplicaResultState) - || ((ReplicaResultState)resObj).callCount == 0); + || ((ReplicaResultState) resObj).callCount == 0); } /** * Tries to set the result or error for a particular action as if there were no replica calls. * @return null if successful; replica state if there were in fact replica calls. */ - private ReplicaResultState trySetResultSimple(int index, Row row, boolean isError, - Object result, ServerName server, boolean isFromReplica) { + private ReplicaResultState trySetResultSimple(int index, Row row, boolean isError, Object result, + ServerName server, boolean isFromReplica) { Object resObj = null; if (!AsyncProcess.isReplicaGet(row)) { if (isFromReplica) { @@ -1083,10 +1076,10 @@ private ReplicaResultState trySetResultSimple(int index, Row row, boolean isErro } ReplicaResultState rrs = - (resObj instanceof ReplicaResultState) ? (ReplicaResultState)resObj : null; + (resObj instanceof ReplicaResultState) ? (ReplicaResultState) resObj : null; if (rrs == null && isError) { // The resObj is not replica state (null or already set). - errors.add((Throwable)result, row, server); + errors.add((Throwable) result, row, server); } if (resObj == null) { @@ -1155,7 +1148,7 @@ public void waitUntilDone() throws InterruptedIOException { } } - private boolean waitUntilDone(long cutoff) throws InterruptedException{ + private boolean waitUntilDone(long cutoff) throws InterruptedException { boolean hasWait = cutoff != Long.MAX_VALUE; long lastLog = EnvironmentEdgeManager.currentTime(); long currentInProgress; @@ -1206,15 +1199,15 @@ public Object[] getResults() throws InterruptedIOException { } /** - * Creates the server error tracker to use inside process. - * Currently, to preserve the main assumption about current retries, and to work well with - * the retry-limit-based calculation, the calculation is local per Process object. - * We may benefit from connection-wide tracking of server errors. + * Creates the server error tracker to use inside process. Currently, to preserve the main + * assumption about current retries, and to work well with the retry-limit-based calculation, the + * calculation is local per Process object. We may benefit from connection-wide tracking of server + * errors. * @return ServerErrorTracker to use, null if there is no ServerErrorTracker on this connection */ private ConnectionImplementation.ServerErrorTracker createServerErrorTracker() { - return new ConnectionImplementation.ServerErrorTracker( - asyncProcess.serverTrackerTimeout, asyncProcess.numTries); + return new ConnectionImplementation.ServerErrorTracker(asyncProcess.serverTrackerTimeout, + asyncProcess.numTries); } /** @@ -1222,16 +1215,15 @@ private ConnectionImplementation.ServerErrorTracker createServerErrorTracker() { */ private MultiServerCallable createCallable(final ServerName server, TableName tableName, final MultiAction multi) { - return new MultiServerCallable(asyncProcess.connection, tableName, server, - multi, asyncProcess.rpcFactory.newController(), rpcTimeout, tracker, multi.getPriority()); + return new MultiServerCallable(asyncProcess.connection, tableName, server, multi, + asyncProcess.rpcFactory.newController(), rpcTimeout, tracker, multi.getPriority()); } private void updateResult(int index, Object result) { Object current = results[index]; if (current != null) { if (LOG.isDebugEnabled()) { - LOG.debug("The result is assigned repeatedly! current:" + current - + ", new:" + result); + LOG.debug("The result is assigned repeatedly! current:" + current + ", new:" + result); } } results[index] = result; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java index 586e7d52e074..03fb32648590 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCaller.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.client.ConnectionUtils.SLEEP_DELTA_NS; @@ -168,14 +166,15 @@ protected final void onError(Throwable t, Supplier errMsg, return; } if (tries > startLogErrorsCnt) { - LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts + - ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + - " ms, time elapsed = " + elapsedMs() + " ms", error); + LOG.warn(errMsg.get() + ", tries = " + tries + ", maxAttempts = " + maxAttempts + + ", timeout = " + TimeUnit.NANOSECONDS.toMillis(operationTimeoutNs) + + " ms, time elapsed = " + elapsedMs() + " ms", + error); } updateCachedLocation.accept(error); RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(error, - EnvironmentEdgeManager.currentTime(), ""); + new RetriesExhaustedException.ThrowableWithExtraContext(error, + EnvironmentEdgeManager.currentTime(), ""); exceptions.add(qt); if (tries >= maxAttempts) { completeExceptionally(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java index 48bde4434be7..2743fabe5890 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRpcRetryingCallerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -93,8 +93,8 @@ public SingleRequestCallerBuilder row(byte[] row) { return this; } - public SingleRequestCallerBuilder action( - AsyncSingleRequestRpcRetryingCaller.Callable callable) { + public SingleRequestCallerBuilder + action(AsyncSingleRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -156,8 +156,8 @@ private void preCheck() { public AsyncSingleRequestRpcRetryingCaller build() { preCheck(); return new AsyncSingleRequestRpcRetryingCaller<>(retryTimer, conn, tableName, row, replicaId, - locateType, callable, priority, pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + locateType, callable, priority, pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, + rpcTimeoutNs, startLogErrorsCnt); } /** @@ -291,9 +291,9 @@ private void preCheck() { public AsyncScanSingleRegionRpcRetryingCaller build() { preCheck(); return new AsyncScanSingleRegionRpcRetryingCaller(retryTimer, conn, scan, scanMetrics, - scannerId, resultCache, consumer, stub, loc, isRegionServerRemote, priority, - scannerLeaseTimeoutPeriodNs, pauseNs, pauseForCQTBENs, maxAttempts, scanTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + scannerId, resultCache, consumer, stub, loc, isRegionServerRemote, priority, + scannerLeaseTimeoutPeriodNs, pauseNs, pauseForCQTBENs, maxAttempts, scanTimeoutNs, + rpcTimeoutNs, startLogErrorsCnt); } /** @@ -364,7 +364,7 @@ public BatchCallerBuilder startLogErrorsCnt(int startLogErrorsCnt) { public AsyncBatchRpcRetryingCaller build() { return new AsyncBatchRpcRetryingCaller<>(retryTimer, conn, tableName, actions, pauseNs, - pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); } public List> call() { @@ -385,8 +385,8 @@ public class MasterRequestCallerBuilder extends BuilderBase { private int priority = PRIORITY_UNSET; - public MasterRequestCallerBuilder action( - AsyncMasterRequestRpcRetryingCaller.Callable callable) { + public MasterRequestCallerBuilder + action(AsyncMasterRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -438,7 +438,8 @@ private void preCheck() { public AsyncMasterRequestRpcRetryingCaller build() { preCheck(); return new AsyncMasterRequestRpcRetryingCaller(retryTimer, conn, callable, priority, - pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, + startLogErrorsCnt); } /** @@ -466,8 +467,8 @@ public class AdminRequestCallerBuilder extends BuilderBase { private int priority; - public AdminRequestCallerBuilder action( - AsyncAdminRequestRetryingCaller.Callable callable) { + public AdminRequestCallerBuilder + action(AsyncAdminRequestRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -514,8 +515,8 @@ public AdminRequestCallerBuilder priority(int priority) { public AsyncAdminRequestRetryingCaller build() { return new AsyncAdminRequestRetryingCaller(retryTimer, conn, priority, pauseNs, - pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, - checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); + pauseForCQTBENs, maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, + checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); } public CompletableFuture call() { @@ -537,8 +538,8 @@ public class ServerRequestCallerBuilder extends BuilderBase { private ServerName serverName; - public ServerRequestCallerBuilder action( - AsyncServerRequestRpcRetryingCaller.Callable callable) { + public ServerRequestCallerBuilder + action(AsyncServerRequestRpcRetryingCaller.Callable callable) { this.callable = callable; return this; } @@ -580,8 +581,8 @@ public ServerRequestCallerBuilder serverName(ServerName serverName) { public AsyncServerRequestRpcRetryingCaller build() { return new AsyncServerRequestRpcRetryingCaller(retryTimer, conn, pauseNs, pauseForCQTBENs, - maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, - checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); + maxAttempts, operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt, + checkNotNull(serverName, "serverName is null"), checkNotNull(callable, "action is null")); } public CompletableFuture call() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java index 7f19180a0ab2..c316ef13c3bc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncScanSingleRegionRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -170,8 +170,8 @@ public ScanControllerImpl(Optional cursor) { private void preCheck() { Preconditions.checkState(Thread.currentThread() == callerThread, - "The current thread is %s, expected thread is %s, " + - "you should not call this method outside onNext or onHeartbeat", + "The current thread is %s, expected thread is %s, " + + "you should not call this method outside onNext or onHeartbeat", Thread.currentThread(), callerThread); Preconditions.checkState(state.equals(ScanControllerState.INITIALIZED), "Invalid Stopper state %s", state); @@ -201,7 +201,7 @@ ScanControllerState destroy() { @Override public Optional cursor() { - return cursor; + return cursor; } } @@ -352,9 +352,9 @@ private void closeScanner() { ScanRequest req = RequestConverter.buildScanRequest(this.scannerId, 0, true, false); stub.scan(controller, req, resp -> { if (controller.failed()) { - LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId + - " for " + loc.getRegion().getEncodedName() + " of " + - loc.getRegion().getTable() + " failed, ignore, probably already closed", + LOG.warn("Call to " + loc.getServerName() + " for closing scanner id = " + scannerId + + " for " + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + + " failed, ignore, probably already closed", controller.getFailed()); } }); @@ -392,19 +392,19 @@ private void completeWhenError(boolean closeScanner) { private void onError(Throwable error) { error = translateException(error); if (tries > startLogErrorsCnt) { - LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " + - loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + - " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " + - TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() + - " ms", + LOG.warn("Call to " + loc.getServerName() + " for scanner id = " + scannerId + " for " + + loc.getRegion().getEncodedName() + " of " + loc.getRegion().getTable() + + " failed, , tries = " + tries + ", maxAttempts = " + maxAttempts + ", timeout = " + + TimeUnit.NANOSECONDS.toMillis(scanTimeoutNs) + " ms, time elapsed = " + elapsedMs() + + " ms", error); } - boolean scannerClosed = - error instanceof UnknownScannerException || error instanceof NotServingRegionException || - error instanceof RegionServerStoppedException || error instanceof ScannerResetException; + boolean scannerClosed = error instanceof UnknownScannerException + || error instanceof NotServingRegionException + || error instanceof RegionServerStoppedException || error instanceof ScannerResetException; RetriesExhaustedException.ThrowableWithExtraContext qt = - new RetriesExhaustedException.ThrowableWithExtraContext(error, - EnvironmentEdgeManager.currentTime(), ""); + new RetriesExhaustedException.ThrowableWithExtraContext(error, + EnvironmentEdgeManager.currentTime(), ""); exceptions.add(qt); if (tries >= maxAttempts) { completeExceptionally(!scannerClosed); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java index 52a2abe39440..97c915df9bf5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,8 +31,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; /** - * Retry caller for a request call to region server. - * Now only used for coprocessor call to region server. + * Retry caller for a request call to region server. Now only used for coprocessor call to region + * server. */ @InterfaceAudience.Private public class AsyncServerRequestRpcRetryingCaller extends AsyncRpcRetryingCaller { @@ -49,7 +49,7 @@ public AsyncServerRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl long pauseNs, long pauseForCQTBENs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt, ServerName serverName, Callable callable) { super(retryTimer, conn, HConstants.NORMAL_QOS, pauseNs, pauseForCQTBENs, maxAttempts, - operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); + operationTimeoutNs, rpcTimeoutNs, startLogErrorsCnt); this.serverName = serverName; this.callable = callable; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java index 2a552c71b3dd..e9cbc3a02204 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncSingleRequestRpcRetryingCaller.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public AsyncSingleRequestRpcRetryingCaller(Timer retryTimer, AsyncConnectionImpl Callable callable, int priority, long pauseNs, long pauseForCQTBENs, int maxAttempts, long operationTimeoutNs, long rpcTimeoutNs, int startLogErrorsCnt) { super(retryTimer, conn, priority, pauseNs, pauseForCQTBENs, maxAttempts, operationTimeoutNs, - rpcTimeoutNs, startLogErrorsCnt); + rpcTimeoutNs, startLogErrorsCnt); this.tableName = tableName; this.row = row; this.replicaId = replicaId; @@ -73,8 +73,8 @@ private void call(HRegionLocation loc) { stub = conn.getRegionServerStub(loc.getServerName()); } catch (IOException e) { onError(e, - () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + - "' in " + loc.getRegion().getEncodedName() + " of " + tableName + " failed", + () -> "Get async stub to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + + "' in " + loc.getRegion().getEncodedName() + " of " + tableName + " failed", err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } @@ -82,8 +82,8 @@ private void call(HRegionLocation loc) { addListener(callable.call(controller, loc, stub), (result, error) -> { if (error != null) { onError(error, - () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " + - loc.getRegion().getEncodedName() + " of " + tableName + " failed", + () -> "Call to " + loc.getServerName() + " for '" + Bytes.toStringBinary(row) + "' in " + + loc.getRegion().getEncodedName() + " of " + tableName + " failed", err -> conn.getLocator().updateCachedLocationOnError(loc, err)); return; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java index c7003e052377..07604e8b7aaa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -185,7 +185,7 @@ default CompletableFuture exists(Get get) { * {@link CompletableFuture}. */ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount) { + long amount) { return incrementColumnValue(row, family, qualifier, amount, Durability.SYNC_WAL); } @@ -205,12 +205,12 @@ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, * {@link CompletableFuture}. */ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, byte[] qualifier, - long amount, Durability durability) { + long amount, Durability durability) { Preconditions.checkNotNull(row, "row is null"); Preconditions.checkNotNull(family, "family is null"); return increment( new Increment(row).addColumn(family, qualifier, amount).setDurability(durability)) - .thenApply(r -> Bytes.toLong(r.getValue(family, qualifier))); + .thenApply(r -> Bytes.toLong(r.getValue(family, qualifier))); } /** @@ -234,16 +234,15 @@ default CompletableFuture incrementColumnValue(byte[] row, byte[] family, * * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family); /** * A helper class for sending checkAndMutate request. - * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated interface CheckAndMutateBuilder { @@ -320,16 +319,15 @@ default CheckAndMutateBuilder ifEquals(byte[] value) { * * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter); /** * A helper class for sending checkAndMutate request with a filter. - * * @deprecated Since 2.4.0, will be removed in 4.0.0. For internal test use only, do not use it - * any more. + * any more. */ @Deprecated interface CheckAndMutateWithFilterBuilder { @@ -377,7 +375,7 @@ interface CheckAndMutateWithFilterBuilder { * @return A list of {@link CompletableFuture}s that represent the result for each CheckAndMutate. */ List> - checkAndMutate(List checkAndMutates); + checkAndMutate(List checkAndMutates); /** * A simple version of batch checkAndMutate. It will fail if there are any failures. @@ -385,7 +383,7 @@ interface CheckAndMutateWithFilterBuilder { * @return A {@link CompletableFuture} that wrapper the result list. */ default CompletableFuture> - checkAndMutateAll(List checkAndMutates) { + checkAndMutateAll(List checkAndMutates) { return allOf(checkAndMutate(checkAndMutates)); } @@ -481,7 +479,7 @@ default ResultScanner getScanner(byte[] family, byte[] qualifier) { */ default List> exists(List gets) { return get(toCheckExistenceOnly(gets)).stream() - .> map(f -> f.thenApply(r -> r.getExists())).collect(toList()); + .> map(f -> f.thenApply(r -> r.getExists())).collect(toList()); } /** @@ -589,7 +587,7 @@ default CompletableFuture> batchAll(List actions) { * @see ServiceCaller */ CompletableFuture coprocessorService(Function stubMaker, - ServiceCaller callable, byte[] row); + ServiceCaller callable, byte[] row); /** * The callback when we want to execute a coprocessor call on a range of regions. @@ -728,5 +726,5 @@ default CoprocessorServiceBuilder toRow(byte[] endKey) { * for more details. */ CoprocessorServiceBuilder coprocessorService(Function stubMaker, - ServiceCaller callable, CoprocessorCallback callback); + ServiceCaller callable, CoprocessorCallback callback); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java index 4c883a8332d7..ebaa33a3837e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java index 399d9ddfaffe..554782b1175f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableBuilderBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.retries2Attempts; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -54,7 +53,7 @@ abstract class AsyncTableBuilderBase AsyncTableBuilderBase(TableName tableName, AsyncConnectionConfiguration connConf) { this.tableName = tableName; this.operationTimeoutNs = tableName.isSystemTable() ? connConf.getMetaOperationTimeoutNs() - : connConf.getOperationTimeoutNs(); + : connConf.getOperationTimeoutNs(); this.scanTimeoutNs = connConf.getScanTimeoutNs(); this.rpcTimeoutNs = connConf.getRpcTimeoutNs(); this.readRpcTimeoutNs = connConf.getReadRpcTimeoutNs(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java index 0ef6edf080c9..96d43acbbffe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static java.util.stream.Collectors.toList; + import com.google.protobuf.RpcChannel; import io.opentelemetry.context.Context; import java.io.IOException; @@ -177,8 +178,7 @@ public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value) public CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) { return new CheckAndMutateWithFilterBuilder() { - private final CheckAndMutateWithFilterBuilder builder = - rawTable.checkAndMutate(row, filter); + private final CheckAndMutateWithFilterBuilder builder = rawTable.checkAndMutate(row, filter); @Override public CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange) { @@ -209,10 +209,9 @@ public CompletableFuture checkAndMutate(CheckAndMutate che } @Override - public List> checkAndMutate( - List checkAndMutates) { - return rawTable.checkAndMutate(checkAndMutates).stream() - .map(this::wrap).collect(toList()); + public List> + checkAndMutate(List checkAndMutates) { + return rawTable.checkAndMutate(checkAndMutates).stream().map(this::wrap).collect(toList()); } @Override @@ -303,7 +302,7 @@ public void onError(Throwable error) { } }; CoprocessorServiceBuilder builder = - rawTable.coprocessorService(stubMaker, callable, wrappedCallback); + rawTable.coprocessorService(stubMaker, callable, wrappedCallback); return new CoprocessorServiceBuilder() { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java index 321f44e87b51..8ad51ea7efaf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -149,8 +149,8 @@ default CompletableFuture> getEndKeys() throws IOException { default CompletableFuture>> getStartEndKeys() throws IOException { return getAllRegionLocations().thenApply( locs -> locs.stream().filter(loc -> RegionReplicaUtil.isDefaultReplica(loc.getRegion())) - .map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())) - .collect(Collectors.toList())); + .map(HRegionLocation::getRegion).map(r -> Pair.newPair(r.getStartKey(), r.getEndKey())) + .collect(Collectors.toList())); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java index d5b275d2a77e..b74b1e8529ff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableRegionLocatorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public TableName getName() { @Override public CompletableFuture getRegionLocation(byte[] row, int replicaId, - boolean reload) { + boolean reload) { return conn.getLocator().getRegionLocation(tableName, row, replicaId, RegionLocateType.CURRENT, reload, -1L); } @@ -59,18 +59,18 @@ public CompletableFuture> getAllRegionLocations() { return tracedFuture(() -> { if (TableName.isMetaTableName(tableName)) { return conn.registry.getMetaRegionLocations() - .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); + .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); } return AsyncMetaTableAccessor - .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName); + .getTableHRegionLocations(conn.getTable(TableName.META_TABLE_NAME), tableName); }, getClass().getSimpleName() + ".getAllRegionLocations"); } @Override public CompletableFuture> getRegionLocations(byte[] row, boolean reload) { return conn.getLocator() - .getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L) - .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); + .getRegionLocations(tableName, row, RegionLocateType.CURRENT, reload, -1L) + .thenApply(locs -> Arrays.asList(locs.getRegionLocations())); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java index 616cf6b9e601..02f0be02a3ae 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncTableResultScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,10 +71,10 @@ private void addToCache(Result result) { private void stopPrefetch(ScanController controller) { if (LOG.isDebugEnabled()) { - LOG.debug("{} stop prefetching when scanning {} as the cache size {}" + - " is greater than the maxCacheSize {}", - String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize, - maxCacheSize); + LOG.debug( + "{} stop prefetching when scanning {} as the cache size {}" + + " is greater than the maxCacheSize {}", + String.format("0x%x", System.identityHashCode(this)), tableName, cacheSize, maxCacheSize); } resumer = controller.suspend(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java index d693cb329b30..c7ba64bd8c6d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Attributes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Public public interface Attributes { /** - * Sets an attribute. - * In case value = null attribute is removed from the attributes map. - * Attribute names starting with _ indicate system attributes. + * Sets an attribute. In case value = null attribute is removed from the attributes map. Attribute + * names starting with _ indicate system attributes. * @param name attribute name * @param value attribute value */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java index 4e67bcedbd84..70a809da05bb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceRequest.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,19 +34,15 @@ public final static class Builder { private boolean dryRun = false; private boolean ignoreRegionsInTransition = false; - private Builder() {} + private Builder() { + } /** - * Updates BalancerRequest to run the balancer in dryRun mode. - * In this mode, the balancer will try to find a plan but WILL NOT - * execute any region moves or call any coprocessors. - * - * You can run in dryRun mode regardless of whether the balancer switch - * is enabled or disabled, but dryRun mode will not run over an existing - * request or chore. - * - * Dry run is useful for testing out new balance configs. See the logs - * on the active HMaster for the results of the dry run. + * Updates BalancerRequest to run the balancer in dryRun mode. In this mode, the balancer will + * try to find a plan but WILL NOT execute any region moves or call any coprocessors. You can + * run in dryRun mode regardless of whether the balancer switch is enabled or disabled, but + * dryRun mode will not run over an existing request or chore. Dry run is useful for testing out + * new balance configs. See the logs on the active HMaster for the results of the dry run. */ public Builder setDryRun(boolean dryRun) { this.dryRun = dryRun; @@ -55,10 +50,8 @@ public Builder setDryRun(boolean dryRun) { } /** - * Updates BalancerRequest to run the balancer even if there are regions - * in transition. - * - * WARNING: Advanced usage only, this could cause more issues than it fixes. + * Updates BalancerRequest to run the balancer even if there are regions in transition. WARNING: + * Advanced usage only, this could cause more issues than it fixes. */ public Builder setIgnoreRegionsInTransition(boolean ignoreRegionsInTransition) { this.ignoreRegionsInTransition = ignoreRegionsInTransition; @@ -81,8 +74,8 @@ public static Builder newBuilder() { } /** - * Get a BalanceRequest for a default run of the balancer. The default mode executes - * any moves calculated and will not run if regions are already in transition. + * Get a BalanceRequest for a default run of the balancer. The default mode executes any moves + * calculated and will not run if regions are already in transition. */ public static BalanceRequest defaultInstance() { return DEFAULT; @@ -97,16 +90,16 @@ private BalanceRequest(boolean dryRun, boolean ignoreRegionsInTransition) { } /** - * Returns true if the balancer should run in dry run mode, otherwise false. In - * dry run mode, moves will be calculated but not executed. + * Returns true if the balancer should run in dry run mode, otherwise false. In dry run mode, + * moves will be calculated but not executed. */ public boolean isDryRun() { return dryRun; } /** - * Returns true if the balancer should execute even if regions are in transition, otherwise - * false. This is an advanced usage feature, as it can cause more issues than it fixes. + * Returns true if the balancer should execute even if regions are in transition, otherwise false. + * This is an advanced usage feature, as it can cause more issues than it fixes. */ public boolean isIgnoreRegionsInTransition() { return ignoreRegionsInTransition; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java index 143878209d11..c7914f150de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalanceResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +26,8 @@ public final class BalanceResponse { /** - * Used in HMaster to build a {@link BalanceResponse} for returning results of a balance invocation to callers + * Used in HMaster to build a {@link BalanceResponse} for returning results of a balance + * invocation to callers */ @InterfaceAudience.Private public final static class Builder { @@ -35,13 +35,13 @@ public final static class Builder { private int movesCalculated; private int movesExecuted; - private Builder() {} + private Builder() { + } /** * Set true if the balancer ran, otherwise false. The balancer may not run in some - * circumstances, such as if a balance is already running or there are regions already - * in transition. - * + * circumstances, such as if a balance is already running or there are regions already in + * transition. * @param balancerRan true if balancer ran, false otherwise */ public Builder setBalancerRan(boolean balancerRan) { @@ -52,7 +52,6 @@ public Builder setBalancerRan(boolean balancerRan) { /** * Set how many moves were calculated by the balancer. This will be zero if the cluster is * already balanced. - * * @param movesCalculated moves calculated by the balance run */ public Builder setMovesCalculated(int movesCalculated) { @@ -64,7 +63,6 @@ public Builder setMovesCalculated(int movesCalculated) { * Set how many of the calculated moves were actually executed by the balancer. This should be * zero if the balancer is run with {@link BalanceRequest#isDryRun()}. It may also not equal * movesCalculated if the balancer ran out of time while executing the moves. - * * @param movesExecuted moves executed by the balance run */ public Builder setMovesExecuted(int movesExecuted) { @@ -98,9 +96,9 @@ private BalanceResponse(boolean balancerRan, int movesCalculated, int movesExecu } /** - * Returns true if the balancer ran, otherwise false. The balancer may not run for a - * variety of reasons, such as: another balance is running, there are regions in - * transition, the cluster is in maintenance mode, etc. + * Returns true if the balancer ran, otherwise false. The balancer may not run for a variety of + * reasons, such as: another balance is running, there are regions in transition, the cluster is + * in maintenance mode, etc. */ public boolean isBalancerRan() { return balancerRan; @@ -115,10 +113,10 @@ public int getMovesCalculated() { } /** - * The number of moves actually executed by the balancer if it ran. This will be - * zero if {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} - * was true. It may also not be equal to {@link #getMovesCalculated()} if the balancer - * was interrupted midway through executing the moves due to max run time. + * The number of moves actually executed by the balancer if it ran. This will be zero if + * {@link #getMovesCalculated()} is zero or if {@link BalanceRequest#isDryRun()} was true. It may + * also not be equal to {@link #getMovesCalculated()} if the balancer was interrupted midway + * through executing the moves due to max run time. */ public int getMovesExecuted() { return movesExecuted; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java index e2bf2e28e0e7..1c22203b6dc1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerDecision.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.List; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -45,13 +42,12 @@ final public class BalancerDecision extends LogEntry { // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .registerTypeAdapter(BalancerDecision.class, (JsonSerializer) - (balancerDecision, type, jsonSerializationContext) -> { - Gson gson = new Gson(); - return gson.toJsonTree(balancerDecision); - }).create(); + private static final Gson GSON = + GsonUtil.createGson().setPrettyPrinting().registerTypeAdapter(BalancerDecision.class, + (JsonSerializer) (balancerDecision, type, jsonSerializationContext) -> { + Gson gson = new Gson(); + return gson.toJsonTree(balancerDecision); + }).create(); private BalancerDecision(String initialFunctionCosts, String finalFunctionCosts, double initTotalCost, double computedTotalCost, List regionPlans, @@ -90,14 +86,10 @@ public long getComputedSteps() { @Override public String toString() { - return new ToStringBuilder(this) - .append("initialFunctionCosts", initialFunctionCosts) - .append("finalFunctionCosts", finalFunctionCosts) - .append("initTotalCost", initTotalCost) - .append("computedTotalCost", computedTotalCost) - .append("computedSteps", computedSteps) - .append("regionPlans", regionPlans) - .toString(); + return new ToStringBuilder(this).append("initialFunctionCosts", initialFunctionCosts) + .append("finalFunctionCosts", finalFunctionCosts).append("initTotalCost", initTotalCost) + .append("computedTotalCost", computedTotalCost).append("computedSteps", computedSteps) + .append("regionPlans", regionPlans).toString(); } @Override @@ -144,8 +136,8 @@ public Builder setComputedSteps(long computedSteps) { } public BalancerDecision build() { - return new BalancerDecision(initialFunctionCosts, finalFunctionCosts, - initTotalCost, computedTotalCost, regionPlans, computedSteps); + return new BalancerDecision(initialFunctionCosts, finalFunctionCosts, initTotalCost, + computedTotalCost, regionPlans, computedSteps); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java index d6e6cee20fc8..3bc114d7beec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BalancerRejection.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hbase.util.GsonUtil; import org.apache.yetus.audience.InterfaceAudience; @@ -37,27 +34,25 @@ @InterfaceAudience.Public @InterfaceStability.Evolving final public class BalancerRejection extends LogEntry { - //The reason why balancer was rejected + // The reason why balancer was rejected private final String reason; private final List costFuncInfoList; // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .disableHtmlEscaping() - .registerTypeAdapter(BalancerRejection.class, (JsonSerializer) - (balancerRejection, type, jsonSerializationContext) -> { - Gson gson = new Gson(); - return gson.toJsonTree(balancerRejection); - }).create(); + private static final Gson GSON = GsonUtil.createGson().setPrettyPrinting().disableHtmlEscaping() + .registerTypeAdapter(BalancerRejection.class, + (JsonSerializer) (balancerRejection, type, jsonSerializationContext) -> { + Gson gson = new Gson(); + return gson.toJsonTree(balancerRejection); + }) + .create(); private BalancerRejection(String reason, List costFuncInfoList) { this.reason = reason; - if(costFuncInfoList == null){ + if (costFuncInfoList == null) { this.costFuncInfoList = Collections.emptyList(); - } - else { + } else { this.costFuncInfoList = costFuncInfoList; } } @@ -72,10 +67,8 @@ public List getCostFuncInfoList() { @Override public String toString() { - return new ToStringBuilder(this) - .append("reason", reason) - .append("costFuncInfoList", costFuncInfoList.toString()) - .toString(); + return new ToStringBuilder(this).append("reason", reason) + .append("costFuncInfoList", costFuncInfoList.toString()).toString(); } @Override @@ -92,19 +85,15 @@ public Builder setReason(String reason) { return this; } - public void addCostFuncInfo(String funcName, double cost, float multiplier){ - if(costFuncInfoList == null){ + public void addCostFuncInfo(String funcName, double cost, float multiplier) { + if (costFuncInfoList == null) { costFuncInfoList = new ArrayList<>(); } - costFuncInfoList.add( - new StringBuilder() - .append(funcName) - .append(" cost:").append(cost) - .append(" multiplier:").append(multiplier) - .toString()); + costFuncInfoList.add(new StringBuilder().append(funcName).append(" cost:").append(cost) + .append(" multiplier:").append(multiplier).toString()); } - public Builder setCostFuncInfoList(List costFuncInfoList){ + public Builder setCostFuncInfoList(List costFuncInfoList) { this.costFuncInfoList = costFuncInfoList; return this; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java index d3cdc74fdf60..813f1c8b5f39 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchErrors.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; +import java.util.ArrayList; +import java.util.List; import org.apache.hadoop.hbase.ServerName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; - class BatchErrors { private static final Logger LOG = LoggerFactory.getLogger(BatchErrors.class); final List throwables = new ArrayList<>(); @@ -33,7 +30,7 @@ class BatchErrors { final List addresses = new ArrayList<>(); public synchronized void add(Throwable ex, Row row, ServerName serverName) { - if (row == null){ + if (row == null) { throw new IllegalArgumentException("row cannot be null. location=" + serverName); } @@ -48,11 +45,10 @@ public boolean hasErrors() { synchronized RetriesExhaustedWithDetailsException makeException(boolean logDetails) { if (logDetails) { - LOG.error("Exception occurred! Exception details: " + throwables + ";\nActions: " - + actions); + LOG.error("Exception occurred! Exception details: " + throwables + ";\nActions: " + actions); } return new RetriesExhaustedWithDetailsException(new ArrayList<>(throwables), - new ArrayList<>(actions), new ArrayList<>(addresses)); + new ArrayList<>(actions), new ArrayList<>(addresses)); } public synchronized void clear() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java index 3b27298585e9..8cbb8dcec4bb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BatchScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,11 +24,10 @@ import java.util.ArrayList; import java.util.Deque; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A scan result cache for batched scan, i.e, @@ -142,8 +141,8 @@ public Result[] addAndGet(Result[] results, boolean isHeartbeatMessage) throws I numberOfCompleteRows++; } // check if we have a row change - if (!partialResults.isEmpty() && - !Bytes.equals(partialResults.peek().getRow(), result.getRow())) { + if (!partialResults.isEmpty() + && !Bytes.equals(partialResults.peek().getRow(), result.getRow())) { regroupedResults.add(createCompletedResult()); } Result regroupedResult = regroupResults(result); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java index 7805f77e30e1..d638f8122a70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,34 +25,38 @@ import org.apache.yetus.audience.InterfaceAudience; /** - *

    Used to communicate with a single HBase table similar to {@link Table} but meant for - * batched, asynchronous puts. Obtain an instance from a {@link Connection} and call - * {@link #close()} afterwards. Customizations can be applied to the {@code BufferedMutator} via - * the {@link BufferedMutatorParams}. + *

    + * Used to communicate with a single HBase table similar to {@link Table} but meant for batched, + * asynchronous puts. Obtain an instance from a {@link Connection} and call {@link #close()} + * afterwards. Customizations can be applied to the {@code BufferedMutator} via the + * {@link BufferedMutatorParams}. *

    - * - *

    Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. - * The default implementation is to throw the exception upon receipt. This behavior can be - * overridden with a custom implementation, provided as a parameter with - * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}.

    - * - *

    Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs - * benefit from batching, but have no natural flush point. {@code BufferedMutator} receives the - * puts from the M/R job and will batch puts based on some heuristic, such as the accumulated size - * of the puts, and submit batches of puts asynchronously so that the M/R logic can continue - * without interruption. + *

    + * Exception handling with asynchronously via the {@link BufferedMutator.ExceptionListener}. The + * default implementation is to throw the exception upon receipt. This behavior can be overridden + * with a custom implementation, provided as a parameter with + * {@link BufferedMutatorParams#listener(BufferedMutator.ExceptionListener)}. *

    - * - *

    {@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs - * will have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can - * also be effectively used in high volume online systems to batch puts, with the caveat that - * extreme circumstances, such as JVM or machine failure, may cause some data loss.

    - * - *

    NOTE: This class replaces the functionality that used to be available via + *

    + * Map/Reduce jobs are good use cases for using {@code BufferedMutator}. Map/reduce jobs benefit + * from batching, but have no natural flush point. {@code BufferedMutator} receives the puts from + * the M/R job and will batch puts based on some heuristic, such as the accumulated size of the + * puts, and submit batches of puts asynchronously so that the M/R logic can continue without + * interruption. + *

    + *

    + * {@code BufferedMutator} can also be used on more exotic circumstances. Map/Reduce batch jobs will + * have a single {@code BufferedMutator} per thread. A single {@code BufferedMutator} can also be + * effectively used in high volume online systems to batch puts, with the caveat that extreme + * circumstances, such as JVM or machine failure, may cause some data loss. + *

    + *

    + * NOTE: This class replaces the functionality that used to be available via * HTable#setAutoFlush(boolean) set to {@code false}. *

    - * - *

    See also the {@code BufferedMutatorExample} in the hbase-examples module.

    + *

    + * See also the {@code BufferedMutatorExample} in the hbase-examples module. + *

    * @see ConnectionFactory * @see Connection * @since 1.0.0 @@ -66,8 +69,8 @@ public interface BufferedMutator extends Closeable { String CLASSNAME_KEY = "hbase.client.bufferedmutator.classname"; /** - * Having the timer tick run more often that once every 100ms is needless and will - * probably cause too many timer events firing having a negative impact on performance. + * Having the timer tick run more often that once every 100ms is needless and will probably cause + * too many timer events firing having a negative impact on performance. */ long MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS = 100; @@ -79,25 +82,22 @@ public interface BufferedMutator extends Closeable { /** * Returns the {@link org.apache.hadoop.conf.Configuration} object used by this instance. *

    - * The reference returned is not a copy, so any change made to it will - * affect this instance. + * The reference returned is not a copy, so any change made to it will affect this instance. */ Configuration getConfiguration(); /** - * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the - * wire as part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. - * + * Sends a {@link Mutation} to the table. The mutations will be buffered and sent over the wire as + * part of a batch. Currently only supports {@link Put} and {@link Delete} mutations. * @param mutation The data to send. * @throws IOException if a remote or network exception occurs. */ void mutate(Mutation mutation) throws IOException; /** - * Send some {@link Mutation}s to the table. The mutations will be buffered and sent over the - * wire as part of a batch. There is no guarantee of sending entire content of {@code mutations} - * in a single batch; it will be broken up according to the write buffer capacity. - * + * Send some {@link Mutation}s to the table. The mutations will be buffered and sent over the wire + * as part of a batch. There is no guarantee of sending entire content of {@code mutations} in a + * single batch; it will be broken up according to the write buffer capacity. * @param mutations The data to send. * @throws IOException if a remote or network exception occurs. */ @@ -105,24 +105,22 @@ public interface BufferedMutator extends Closeable { /** * Performs a {@link #flush()} and releases any resources held. - * * @throws IOException if a remote or network exception occurs. */ @Override void close() throws IOException; /** - * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they - * are done. - * + * Executes all the buffered, asynchronous {@link Mutation} operations and waits until they are + * done. * @throws IOException if a remote or network exception occurs. */ void flush() throws IOException; /** * Sets the maximum time before the buffer is automatically flushed checking once per second. - * @param timeoutMs The maximum number of milliseconds how long records may be buffered - * before they are flushed. Set to 0 to disable. + * @param timeoutMs The maximum number of milliseconds how long records may be buffered before + * they are flushed. Set to 0 to disable. */ default void setWriteBufferPeriodicFlush(long timeoutMs) { setWriteBufferPeriodicFlush(timeoutMs, 1000L); @@ -130,16 +128,16 @@ default void setWriteBufferPeriodicFlush(long timeoutMs) { /** * Sets the maximum time before the buffer is automatically flushed. - * @param timeoutMs The maximum number of milliseconds how long records may be buffered - * before they are flushed. Set to 0 to disable. - * @param timerTickMs The number of milliseconds between each check if the - * timeout has been exceeded. Must be 100ms (as defined in - * {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) - * or larger to avoid performance problems. + * @param timeoutMs The maximum number of milliseconds how long records may be buffered before + * they are flushed. Set to 0 to disable. + * @param timerTickMs The number of milliseconds between each check if the timeout has been + * exceeded. Must be 100ms (as defined in + * {@link #MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS}) or larger to avoid performance + * problems. */ default void setWriteBufferPeriodicFlush(long timeoutMs, long timerTickMs) { throw new UnsupportedOperationException( - "The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented"); + "The BufferedMutator::setWriteBufferPeriodicFlush has not been implemented"); } /** @@ -151,22 +149,22 @@ default void disableWriteBufferPeriodicFlush() { /** * Returns the current periodic flush timeout value in milliseconds. - * @return The maximum number of milliseconds how long records may be buffered before they - * are flushed. The value 0 means this is disabled. + * @return The maximum number of milliseconds how long records may be buffered before they are + * flushed. The value 0 means this is disabled. */ default long getWriteBufferPeriodicFlushTimeoutMs() { throw new UnsupportedOperationException( - "The BufferedMutator::getWriteBufferPeriodicFlushTimeoutMs has not been implemented"); + "The BufferedMutator::getWriteBufferPeriodicFlushTimeoutMs has not been implemented"); } /** * Returns the current periodic flush timertick interval in milliseconds. - * @return The number of milliseconds between each check if the timeout has been exceeded. - * This value only has a real meaning if the timeout has been set to > 0 + * @return The number of milliseconds between each check if the timeout has been exceeded. This + * value only has a real meaning if the timeout has been set to > 0 */ default long getWriteBufferPeriodicFlushTimerTickMs() { throw new UnsupportedOperationException( - "The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented"); + "The BufferedMutator::getWriteBufferPeriodicFlushTimerTickMs has not been implemented"); } /** @@ -192,7 +190,7 @@ default long getWriteBufferPeriodicFlushTimerTickMs() { */ @InterfaceAudience.Public interface ExceptionListener { - public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator mutator) throws RetriesExhaustedWithDetailsException; + public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) + throws RetriesExhaustedWithDetailsException; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java index d3b18864f2a9..e868201538a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorImpl.java @@ -1,16 +1,18 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software distributed under the - * License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing permissions and + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.client; @@ -43,19 +45,17 @@ /** *

    - * Used to communicate with a single HBase table similar to {@link Table} - * but meant for batched, potentially asynchronous puts. Obtain an instance from - * a {@link Connection} and call {@link #close()} afterwards. Provide an alternate - * to this implementation by setting {@link BufferedMutatorParams#implementationClassName(String)} - * or by setting alternate classname via the key {} in Configuration. + * Used to communicate with a single HBase table similar to {@link Table} but meant for batched, + * potentially asynchronous puts. Obtain an instance from a {@link Connection} and call + * {@link #close()} afterwards. Provide an alternate to this implementation by setting + * {@link BufferedMutatorParams#implementationClassName(String)} or by setting alternate classname + * via the key {} in Configuration. *

    - * *

    - * While this can be used across threads, great care should be used when doing so. - * Errors are global to the buffered mutator and the Exceptions can be thrown on any - * thread that causes the flush for requests. + * While this can be used across threads, great care should be used when doing so. Errors are global + * to the buffered mutator and the Exceptions can be thrown on any thread that causes the flush for + * requests. *

    - * * @see ConnectionFactory * @see Connection * @since 1.0.0 @@ -74,15 +74,15 @@ public class BufferedMutatorImpl implements BufferedMutator { private final ConcurrentLinkedQueue writeAsyncBuffer = new ConcurrentLinkedQueue<>(); private final AtomicLong currentWriteBufferSize = new AtomicLong(0); /** - * Count the size of {@link BufferedMutatorImpl#writeAsyncBuffer}. - * The {@link ConcurrentLinkedQueue#size()} is NOT a constant-time operation. + * Count the size of {@link BufferedMutatorImpl#writeAsyncBuffer}. The + * {@link ConcurrentLinkedQueue#size()} is NOT a constant-time operation. */ private final AtomicInteger undealtMutationCount = new AtomicInteger(0); private final long writeBufferSize; private final AtomicLong writeBufferPeriodicFlushTimeoutMs = new AtomicLong(0); private final AtomicLong writeBufferPeriodicFlushTimerTickMs = - new AtomicLong(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); + new AtomicLong(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); private Timer writeBufferPeriodicFlushTimer = null; private final int maxKeyValueSize; @@ -108,41 +108,38 @@ public class BufferedMutatorImpl implements BufferedMutator { cleanupPoolOnClose = false; } ConnectionConfiguration tableConf = new ConnectionConfiguration(conf); - this.writeBufferSize = - params.getWriteBufferSize() != UNSET ? - params.getWriteBufferSize() : tableConf.getWriteBufferSize(); + this.writeBufferSize = params.getWriteBufferSize() != UNSET ? params.getWriteBufferSize() + : tableConf.getWriteBufferSize(); // Set via the setter because it does value validation and starts/stops the TimerTask long newWriteBufferPeriodicFlushTimeoutMs = - params.getWriteBufferPeriodicFlushTimeoutMs() != UNSET - ? params.getWriteBufferPeriodicFlushTimeoutMs() - : tableConf.getWriteBufferPeriodicFlushTimeoutMs(); + params.getWriteBufferPeriodicFlushTimeoutMs() != UNSET + ? params.getWriteBufferPeriodicFlushTimeoutMs() + : tableConf.getWriteBufferPeriodicFlushTimeoutMs(); long newWriteBufferPeriodicFlushTimerTickMs = - params.getWriteBufferPeriodicFlushTimerTickMs() != UNSET - ? params.getWriteBufferPeriodicFlushTimerTickMs() - : tableConf.getWriteBufferPeriodicFlushTimerTickMs(); - this.setWriteBufferPeriodicFlush( - newWriteBufferPeriodicFlushTimeoutMs, - newWriteBufferPeriodicFlushTimerTickMs); - - this.maxKeyValueSize = - params.getMaxKeyValueSize() != UNSET ? - params.getMaxKeyValueSize() : tableConf.getMaxKeyValueSize(); - - this.rpcTimeout = new AtomicInteger( - params.getRpcTimeout() != UNSET ? - params.getRpcTimeout() : conn.getConnectionConfiguration().getWriteRpcTimeout()); - - this.operationTimeout = new AtomicInteger( - params.getOperationTimeout() != UNSET ? - params.getOperationTimeout() : conn.getConnectionConfiguration().getOperationTimeout()); + params.getWriteBufferPeriodicFlushTimerTickMs() != UNSET + ? params.getWriteBufferPeriodicFlushTimerTickMs() + : tableConf.getWriteBufferPeriodicFlushTimerTickMs(); + this.setWriteBufferPeriodicFlush(newWriteBufferPeriodicFlushTimeoutMs, + newWriteBufferPeriodicFlushTimerTickMs); + + this.maxKeyValueSize = params.getMaxKeyValueSize() != UNSET ? params.getMaxKeyValueSize() + : tableConf.getMaxKeyValueSize(); + + this.rpcTimeout = new AtomicInteger(params.getRpcTimeout() != UNSET ? params.getRpcTimeout() + : conn.getConnectionConfiguration().getWriteRpcTimeout()); + + this.operationTimeout = + new AtomicInteger(params.getOperationTimeout() != UNSET ? params.getOperationTimeout() + : conn.getConnectionConfiguration().getOperationTimeout()); this.ap = ap; } + BufferedMutatorImpl(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory, RpcControllerFactory rpcFactory, BufferedMutatorParams params) { this(conn, params, - // puts need to track errors globally due to how the APIs currently work. - new AsyncProcess(conn, conn.getConfiguration(), rpcCallerFactory, rpcFactory)); + // puts need to track errors globally due to how the APIs currently work. + new AsyncProcess(conn, conn.getConfiguration(), rpcCallerFactory, rpcFactory)); } private void checkClose() { @@ -170,14 +167,14 @@ public Configuration getConfiguration() { } @Override - public void mutate(Mutation m) throws InterruptedIOException, - RetriesExhaustedWithDetailsException { + public void mutate(Mutation m) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { mutate(Collections.singletonList(m)); } @Override - public void mutate(List ms) throws InterruptedIOException, - RetriesExhaustedWithDetailsException { + public void mutate(List ms) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { checkClose(); long toAddSize = 0; @@ -251,12 +248,9 @@ public synchronized void close() throws IOException { } private AsyncProcessTask createTask(QueueRowAccess access) { - return new AsyncProcessTask(AsyncProcessTask.newBuilder() - .setPool(pool) - .setTableName(tableName) - .setRowAccess(access) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.AT_LEAST_ONE) - .build()) { + return new AsyncProcessTask( + AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName).setRowAccess(access) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.AT_LEAST_ONE).build()) { @Override public int getRpcTimeout() { return rpcTimeout.get(); @@ -277,12 +271,11 @@ public void flush() throws InterruptedIOException, RetriesExhaustedWithDetailsEx /** * Send the operations in the buffer to the servers. - * * @param flushAll - if true, sends all the writes and wait for all of them to finish before - * returning. Otherwise, flush until buffer size is smaller than threshold + * returning. Otherwise, flush until buffer size is smaller than threshold */ - private void doFlush(boolean flushAll) throws InterruptedIOException, - RetriesExhaustedWithDetailsException { + private void doFlush(boolean flushAll) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { List errors = new ArrayList<>(); while (true) { if (!flushAll && currentWriteBufferSize.get() <= writeBufferSize) { @@ -308,15 +301,15 @@ private void doFlush(boolean flushAll) throws InterruptedIOException, RetriesExhaustedWithDetailsException exception = makeException(errors); if (exception == null) { return; - } else if(listener == null) { + } else if (listener == null) { throw exception; } else { listener.onException(exception, this); } } - private static RetriesExhaustedWithDetailsException makeException( - List errors) { + private static RetriesExhaustedWithDetailsException + makeException(List errors) { switch (errors.size()) { case 0: return null; @@ -345,17 +338,17 @@ public long getWriteBufferSize() { @Override public synchronized void setWriteBufferPeriodicFlush(long timeoutMs, long timerTickMs) { - long originalTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs.get(); + long originalTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs.get(); long originalTimerTickMs = this.writeBufferPeriodicFlushTimerTickMs.get(); // Both parameters have minimal values. writeBufferPeriodicFlushTimeoutMs.set(Math.max(0, timeoutMs)); - writeBufferPeriodicFlushTimerTickMs.set( - Math.max(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, timerTickMs)); + writeBufferPeriodicFlushTimerTickMs + .set(Math.max(MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, timerTickMs)); // If something changed we stop the old Timer. - if (writeBufferPeriodicFlushTimeoutMs.get() != originalTimeoutMs || - writeBufferPeriodicFlushTimerTickMs.get() != originalTimerTickMs) { + if (writeBufferPeriodicFlushTimeoutMs.get() != originalTimeoutMs + || writeBufferPeriodicFlushTimerTickMs.get() != originalTimerTickMs) { if (writeBufferPeriodicFlushTimer != null) { writeBufferPeriodicFlushTimer.cancel(); writeBufferPeriodicFlushTimer = null; @@ -363,16 +356,14 @@ public synchronized void setWriteBufferPeriodicFlush(long timeoutMs, long timerT } // If we have the need for a timer and there is none we start it - if (writeBufferPeriodicFlushTimer == null && - writeBufferPeriodicFlushTimeoutMs.get() > 0) { + if (writeBufferPeriodicFlushTimer == null && writeBufferPeriodicFlushTimeoutMs.get() > 0) { writeBufferPeriodicFlushTimer = new Timer(true); // Create Timer running as Daemon. writeBufferPeriodicFlushTimer.schedule(new TimerTask() { @Override public void run() { BufferedMutatorImpl.this.timerCallbackForWriteBufferPeriodicFlush(); } - }, writeBufferPeriodicFlushTimerTickMs.get(), - writeBufferPeriodicFlushTimerTickMs.get()); + }, writeBufferPeriodicFlushTimerTickMs.get(), writeBufferPeriodicFlushTimerTickMs.get()); } } @@ -446,10 +437,12 @@ public void close() { public Iterator iterator() { return new Iterator() { private int countDown = remainder; + @Override public boolean hasNext() { return countDown > 0; } + @Override public Row next() { restoreLastMutation(); @@ -464,6 +457,7 @@ public Row next() { --countDown; return last; } + @Override public void remove() { if (last == null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java index 43495b3fab75..9a8040f7dfff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.concurrent.ExecutorService; @@ -43,8 +41,7 @@ public class BufferedMutatorParams implements Cloneable { private BufferedMutator.ExceptionListener listener = new BufferedMutator.ExceptionListener() { @Override public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator bufferedMutator) - throws RetriesExhaustedWithDetailsException { + BufferedMutator bufferedMutator) throws RetriesExhaustedWithDetailsException { throw exception; } }; @@ -141,8 +138,8 @@ public ExecutorService getPool() { } /** - * Override the default executor pool defined by the {@code hbase.htable.threads.*} - * configuration values. + * Override the default executor pool defined by the {@code hbase.htable.threads.*} configuration + * values. */ public BufferedMutatorParams pool(ExecutorService pool) { this.pool = pool; @@ -150,8 +147,8 @@ public BufferedMutatorParams pool(ExecutorService pool) { } /** - * @return Name of the class we will use when we construct a - * {@link BufferedMutator} instance or null if default implementation. + * @return Name of the class we will use when we construct a {@link BufferedMutator} instance or + * null if default implementation. */ public String getImplementationClassName() { return this.implementationClassName; @@ -180,21 +177,20 @@ public BufferedMutatorParams listener(BufferedMutator.ExceptionListener listener /* * (non-Javadoc) - * * @see java.lang.Object#clone() */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL", - justification="The clone below is complete") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "CN_IDIOM_NO_SUPER_CALL", + justification = "The clone below is complete") @Override public BufferedMutatorParams clone() { BufferedMutatorParams clone = new BufferedMutatorParams(this.tableName); - clone.writeBufferSize = this.writeBufferSize; - clone.writeBufferPeriodicFlushTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs; + clone.writeBufferSize = this.writeBufferSize; + clone.writeBufferPeriodicFlushTimeoutMs = this.writeBufferPeriodicFlushTimeoutMs; clone.writeBufferPeriodicFlushTimerTickMs = this.writeBufferPeriodicFlushTimerTickMs; - clone.maxKeyValueSize = this.maxKeyValueSize; - clone.pool = this.pool; - clone.listener = this.listener; - clone.implementationClassName = this.implementationClassName; + clone.maxKeyValueSize = this.maxKeyValueSize; + clone.pool = this.pool; + clone.listener = this.listener; + clone.implementationClassName = this.implementationClassName; return clone; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cancellable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cancellable.java index 5095c96ab327..d766872defb1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cancellable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cancellable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +16,16 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; /** - * This should be implemented by the Get/Scan implementations that - * talk to replica regions. When an RPC response is received from one - * of the replicas, the RPCs to the other replicas are cancelled. + * This should be implemented by the Get/Scan implementations that talk to replica regions. When an + * RPC response is received from one of the replicas, the RPCs to the other replicas are cancelled. */ @InterfaceAudience.Private interface Cancellable { public void cancel(); + public boolean isCancelled(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java index 6ad9254e35e8..84d1ba15b5f4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CancellableRegionServerCallable.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,27 +19,28 @@ import java.io.IOException; import java.io.InterruptedIOException; - import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; + /** - * This class is used to unify HTable calls with AsyncProcess Framework. HTable can use - * AsyncProcess directly though this class. Also adds global timeout tracking on top of - * RegionServerCallable and implements Cancellable. - * Global timeout tracking conflicts with logic in RpcRetryingCallerImpl's callWithRetries. So you - * can only use this callable in AsyncProcess which only uses callWithoutRetries and retries in its - * own implementation. + * This class is used to unify HTable calls with AsyncProcess Framework. HTable can use AsyncProcess + * directly though this class. Also adds global timeout tracking on top of RegionServerCallable and + * implements Cancellable. Global timeout tracking conflicts with logic in RpcRetryingCallerImpl's + * callWithRetries. So you can only use this callable in AsyncProcess which only uses + * callWithoutRetries and retries in its own implementation. */ @InterfaceAudience.Private -abstract class CancellableRegionServerCallable extends ClientServiceCallable implements - Cancellable { +abstract class CancellableRegionServerCallable extends ClientServiceCallable + implements Cancellable { private final RetryingTimeTracker tracker; private final int rpcTimeout; + CancellableRegionServerCallable(Connection connection, TableName tableName, byte[] row, RpcController rpcController, int rpcTimeout, RetryingTimeTracker tracker, int priority) { super(connection, tableName, row, rpcController, priority); @@ -46,8 +48,8 @@ abstract class CancellableRegionServerCallable extends ClientServiceCallable< this.tracker = tracker; } - /* Override so can mess with the callTimeout. - * (non-Javadoc) + /* + * Override so can mess with the callTimeout. (non-Javadoc) * @see org.apache.hadoop.hbase.client.RegionServerCallable#rpcCall(int) */ @Override @@ -92,30 +94,30 @@ public boolean isCancelled() { } protected ClientProtos.MultiResponse doMulti(ClientProtos.MultiRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().multi(getRpcController(), request); } protected ClientProtos.ScanResponse doScan(ClientProtos.ScanRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().scan(getRpcController(), request); } - protected ClientProtos.PrepareBulkLoadResponse doPrepareBulkLoad( - ClientProtos.PrepareBulkLoadRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + protected ClientProtos.PrepareBulkLoadResponse + doPrepareBulkLoad(ClientProtos.PrepareBulkLoadRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().prepareBulkLoad(getRpcController(), request); } - protected ClientProtos.BulkLoadHFileResponse doBulkLoadHFile( - ClientProtos.BulkLoadHFileRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + protected ClientProtos.BulkLoadHFileResponse + doBulkLoadHFile(ClientProtos.BulkLoadHFileRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().bulkLoadHFile(getRpcController(), request); } - protected ClientProtos.CleanupBulkLoadResponse doCleanupBulkLoad( - ClientProtos.CleanupBulkLoadRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { + protected ClientProtos.CleanupBulkLoadResponse + doCleanupBulkLoad(ClientProtos.CleanupBulkLoadRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { return getStub().cleanupBulkLoad(getRpcController(), request); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java index 27be88a9def2..7c4896af2e74 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelector.java @@ -39,10 +39,9 @@ interface CatalogReplicaLoadBalanceSelector { /** * Select a catalog replica region where client go to loop up the input row key. - * * @param tablename table name - * @param row key to look up - * @param locateType locate type + * @param row key to look up + * @param locateType locate type * @return replica id */ int select(TableName tablename, byte[] row, RegionLocateType locateType); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java index 485afb40d261..f0ef0caf8aec 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSelectorFactory.java @@ -36,13 +36,13 @@ private CatalogReplicaLoadBalanceSelectorFactory() { /** * Create a CatalogReplicaLoadBalanceReplicaSelector based on input config. - * @param replicaSelectorClass Selector classname. - * @param tableName System table name. + * @param replicaSelectorClass Selector classname. + * @param tableName System table name. * @param choreService {@link ChoreService} - * @return {@link CatalogReplicaLoadBalanceSelector} + * @return {@link CatalogReplicaLoadBalanceSelector} */ public static CatalogReplicaLoadBalanceSelector createSelector(String replicaSelectorClass, - TableName tableName, ChoreService choreService, IntSupplier getReplicaCount) { + TableName tableName, ChoreService choreService, IntSupplier getReplicaCount) { return ReflectionUtils.instantiateWithCustomCtor(replicaSelectorClass, new Class[] { TableName.class, ChoreService.class, IntSupplier.class }, new Object[] { tableName, choreService, getReplicaCount }); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java index f78dfb199c11..7320563cd29b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaLoadBalanceSimpleSelector.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hbase.client.ConnectionUtils.isEmptyStopRow; import static org.apache.hadoop.hbase.util.Bytes.BYTES_COMPARATOR; import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; + import java.util.Iterator; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -39,35 +40,36 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - *

    CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load - * balancing algorithm. It maintains a stale location cache for each table. Whenever client looks - * up location, it first check if the row is the stale location cache. If yes, the location from - * catalog replica is stale, it will go to the primary region to look up update-to-date location; - * otherwise, it will randomly pick up a replica region for lookup. When clients receive - * RegionNotServedException from region servers, it will add these region locations to the stale - * location cache. The stale cache will be cleaned up periodically by a chore.

    - * + *

    + * CatalogReplicaLoadBalanceReplicaSimpleSelector implements a simple catalog replica load balancing + * algorithm. It maintains a stale location cache for each table. Whenever client looks up location, + * it first check if the row is the stale location cache. If yes, the location from catalog replica + * is stale, it will go to the primary region to look up update-to-date location; otherwise, it will + * randomly pick up a replica region for lookup. When clients receive RegionNotServedException from + * region servers, it will add these region locations to the stale location cache. The stale cache + * will be cleaned up periodically by a chore. + *

    * It follows a simple algorithm to choose a replica to go: - * *
      - *
    1. If there is no stale location entry for rows it looks up, it will randomly - * pick a replica region to do lookup.
    2. - *
    3. If the location from the replica region is stale, client gets RegionNotServedException - * from region server, in this case, it will create StaleLocationCacheEntry in - * CatalogReplicaLoadBalanceReplicaSimpleSelector.
    4. - *
    5. When client tries to do location lookup, it checks StaleLocationCache first for rows it - * tries to lookup, if entry exists, it will go with primary meta region to do lookup; - * otherwise, it will follow step 1.
    6. - *
    7. A chore will periodically run to clean up cache entries in the StaleLocationCache.
    8. + *
    9. If there is no stale location entry for rows it looks up, it will randomly pick a replica + * region to do lookup.
    10. + *
    11. If the location from the replica region is stale, client gets RegionNotServedException from + * region server, in this case, it will create StaleLocationCacheEntry in + * CatalogReplicaLoadBalanceReplicaSimpleSelector.
    12. + *
    13. When client tries to do location lookup, it checks StaleLocationCache first for rows it tries + * to lookup, if entry exists, it will go with primary meta region to do lookup; otherwise, it will + * follow step 1.
    14. + *
    15. A chore will periodically run to clean up cache entries in the StaleLocationCache.
    16. *
    */ -class CatalogReplicaLoadBalanceSimpleSelector implements - CatalogReplicaLoadBalanceSelector, Stoppable { +class CatalogReplicaLoadBalanceSimpleSelector + implements CatalogReplicaLoadBalanceSelector, Stoppable { private static final Logger LOG = - LoggerFactory.getLogger(CatalogReplicaLoadBalanceSimpleSelector.class); + LoggerFactory.getLogger(CatalogReplicaLoadBalanceSimpleSelector.class); private final long STALE_CACHE_TIMEOUT_IN_MILLISECONDS = 3000; // 3 seconds private final int STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS = 1500; // 1.5 seconds private final int REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS = 60000; // 1 minute @@ -96,15 +98,13 @@ public long getTimestamp() { @Override public String toString() { - return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) - .append("endKey", endKey) - .append("timestamp", timestamp) - .toString(); + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE).append("endKey", endKey) + .append("timestamp", timestamp).toString(); } } - private final ConcurrentMap> - staleCache = new ConcurrentHashMap<>(); + private final ConcurrentMap> staleCache = + new ConcurrentHashMap<>(); private volatile int numOfReplicas; private final ChoreService choreService; private final TableName tableName; @@ -112,7 +112,7 @@ public String toString() { private volatile boolean isStopped = false; CatalogReplicaLoadBalanceSimpleSelector(TableName tableName, ChoreService choreService, - IntSupplier getNumOfReplicas) { + IntSupplier getNumOfReplicas) { this.choreService = choreService; this.tableName = tableName; this.getNumOfReplicas = getNumOfReplicas; @@ -125,24 +125,22 @@ public String toString() { } /** - * When a client runs into RegionNotServingException, it will call this method to - * update Selector's internal state. + * When a client runs into RegionNotServingException, it will call this method to update + * Selector's internal state. * @param loc the location which causes exception. */ public void onError(HRegionLocation loc) { - ConcurrentNavigableMap tableCache = - computeIfAbsent(staleCache, loc.getRegion().getTable(), - () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); + ConcurrentNavigableMap tableCache = computeIfAbsent(staleCache, + loc.getRegion().getTable(), () -> new ConcurrentSkipListMap<>(BYTES_COMPARATOR)); byte[] startKey = loc.getRegion().getStartKey(); - tableCache.putIfAbsent(startKey, - new StaleLocationCacheEntry(loc.getRegion().getEndKey())); + tableCache.putIfAbsent(startKey, new StaleLocationCacheEntry(loc.getRegion().getEndKey())); LOG.debug("Add entry to stale cache for table {} with startKey {}, {}", loc.getRegion().getTable(), startKey, loc.getRegion().getEndKey()); } /** - * Select an random replica id. In case there is no replica region configured, return - * the primary replica id. + * Select an random replica id. In case there is no replica region configured, return the primary + * replica id. * @return Replica id */ private int getRandomReplicaId() { @@ -159,20 +157,18 @@ private int getRandomReplicaId() { } /** - * When it looks up a location, it will call this method to find a replica region to go. - * For a normal case, > 99% of region locations from catalog/meta replica will be up to date. - * In extreme cases such as region server crashes, it will depends on how fast replication - * catches up. - * + * When it looks up a location, it will call this method to find a replica region to go. For a + * normal case, > 99% of region locations from catalog/meta replica will be up to date. In extreme + * cases such as region server crashes, it will depends on how fast replication catches up. * @param tablename table name it looks up * @param row key it looks up. * @param locateType locateType, Only BEFORE and CURRENT will be passed in. * @return catalog replica id */ public int select(final TableName tablename, final byte[] row, - final RegionLocateType locateType) { - Preconditions.checkArgument(locateType == RegionLocateType.BEFORE || - locateType == RegionLocateType.CURRENT, + final RegionLocateType locateType) { + Preconditions.checkArgument( + locateType == RegionLocateType.BEFORE || locateType == RegionLocateType.CURRENT, "Expected type BEFORE or CURRENT but got: %s", locateType); ConcurrentNavigableMap tableCache = staleCache.get(tablename); @@ -200,15 +196,15 @@ public int select(final TableName tablename, final byte[] row, // long comparing is faster than comparing byte arrays(in most cases). It could remove // stale entries faster. If the possible match entry does not time out, it will check if // the entry is a match for the row passed in and select the replica id accordingly. - if ((EnvironmentEdgeManager.currentTime() - entry.getValue().getTimestamp()) >= - STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + if ((EnvironmentEdgeManager.currentTime() + - entry.getValue().getTimestamp()) >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { LOG.debug("Entry for table {} with startKey {}, {} times out", tablename, entry.getKey(), entry); tableCache.remove(entry.getKey()); return getRandomReplicaId(); } - byte[] endKey = entry.getValue().getEndKey(); + byte[] endKey = entry.getValue().getEndKey(); // The following logic is borrowed from AsyncNonMetaRegionLocator. if (isEmptyStopRow(endKey)) { @@ -247,12 +243,11 @@ public boolean isStopped() { private void cleanupReplicaReplicaStaleCache() { long curTimeInMills = EnvironmentEdgeManager.currentTime(); for (ConcurrentNavigableMap tableCache : staleCache.values()) { - Iterator> it = - tableCache.entrySet().iterator(); + Iterator> it = tableCache.entrySet().iterator(); while (it.hasNext()) { Map.Entry entry = it.next(); - if (curTimeInMills - entry.getValue().getTimestamp() >= - STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { + if (curTimeInMills + - entry.getValue().getTimestamp() >= STALE_CACHE_TIMEOUT_IN_MILLISECONDS) { LOG.debug("clean entry {}, {} from stale cache", entry.getKey(), entry.getValue()); it.remove(); } @@ -271,17 +266,17 @@ private int refreshCatalogReplicaCount() { } int cachedNumOfReplicas = this.numOfReplicas; - if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) || - (cachedNumOfReplicas != newNumOfReplicas)) { + if ((cachedNumOfReplicas == UNINITIALIZED_NUM_OF_REPLICAS) + || (cachedNumOfReplicas != newNumOfReplicas)) { this.numOfReplicas = newNumOfReplicas; } return newNumOfReplicas; } - private ScheduledChore getCacheCleanupChore( - final CatalogReplicaLoadBalanceSimpleSelector selector) { + private ScheduledChore + getCacheCleanupChore(final CatalogReplicaLoadBalanceSimpleSelector selector) { return new ScheduledChore("CleanupCatalogReplicaStaleCache", this, - STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS) { + STALE_CACHE_CLEAN_CHORE_INTERVAL_IN_MILLISECONDS) { @Override protected void chore() { selector.cleanupReplicaReplicaStaleCache(); @@ -289,10 +284,10 @@ protected void chore() { }; } - private ScheduledChore getRefreshReplicaCountChore( - final CatalogReplicaLoadBalanceSimpleSelector selector) { + private ScheduledChore + getRefreshReplicaCountChore(final CatalogReplicaLoadBalanceSimpleSelector selector) { return new ScheduledChore("RefreshReplicaCountChore", this, - REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS) { + REFRESH_REPLICA_COUNT_CHORE_INTERVAL_IN_MILLISECONDS) { @Override protected void chore() { selector.refreshCatalogReplicaCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java index 40062e32e83c..647d5dcf38f5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CatalogReplicaMode.java @@ -6,7 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * http://www.apache.org/licenses/LICENSE-2.0 + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,18 +20,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** - *

    There are two modes with catalog replica support.

    - * + *

    + * There are two modes with catalog replica support. + *

    *
      - *
    1. HEDGED_READ - Client sends requests to the primary region first, within a - * configured amount of time, if there is no response coming back, - * client sends requests to all replica regions and takes the first - * response.
    2. - * - *
    3. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, - * if results from replica regions are stale, next time, client sends requests for - * these stale locations to the primary region. In this mode, scan - * requests are load balanced across all replica regions.
    4. + *
    5. HEDGED_READ - Client sends requests to the primary region first, within a configured amount + * of time, if there is no response coming back, client sends requests to all replica regions and + * takes the first response.
    6. + *
    7. LOAD_BALANCE - Client sends requests to replica regions in a round-robin mode, if results + * from replica regions are stale, next time, client sends requests for these stale locations to the + * primary region. In this mode, scan requests are load balanced across all replica regions.
    8. *
    */ @InterfaceAudience.Private @@ -54,7 +54,7 @@ public String toString() { }; public static CatalogReplicaMode fromString(final String value) { - for(CatalogReplicaMode mode : values()) { + for (CatalogReplicaMode mode : values()) { if (mode.toString().equalsIgnoreCase(value)) { return mode; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java index 47bbc53797bf..0dcbbb86eafe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CheckAndMutate.java @@ -17,21 +17,23 @@ */ package org.apache.hadoop.hbase.client; +import java.util.Arrays; +import java.util.Objects; import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; -import java.util.Arrays; -import java.util.Objects; /** * Used to perform CheckAndMutate operations. *

    - * Use the builder class to instantiate a CheckAndMutate object. - * This builder class is fluent style APIs, the code are like: + * Use the builder class to instantiate a CheckAndMutate object. This builder class is fluent style + * APIs, the code are like: + * *

      * 
      * // A CheckAndMutate operation where do the specified action if the column (specified by the
    @@ -77,7 +79,6 @@ private Builder(byte[] row) {
     
         /**
          * Check for lack of column
    -     *
          * @param family family to check
          * @param qualifier qualifier to check
          * @return the CheckAndMutate object
    @@ -88,7 +89,6 @@ public Builder ifNotExists(byte[] family, byte[] qualifier) {
     
         /**
          * Check for equality
    -     *
          * @param family family to check
          * @param qualifier qualifier to check
          * @param value the expected value
    @@ -106,7 +106,7 @@ public Builder ifEquals(byte[] family, byte[] qualifier, byte[] value) {
          * @return the CheckAndMutate object
          */
         public Builder ifMatches(byte[] family, byte[] qualifier, CompareOperator compareOp,
    -      byte[] value) {
    +        byte[] value) {
           this.family = Preconditions.checkNotNull(family, "family is null");
           this.qualifier = qualifier;
           this.op = Preconditions.checkNotNull(compareOp, "compareOp is null");
    @@ -135,13 +135,14 @@ public Builder timeRange(TimeRange timeRange) {
         private void preCheck(Row action) {
           Preconditions.checkNotNull(action, "action is null");
           if (!Bytes.equals(row, action.getRow())) {
    -        throw new IllegalArgumentException("The row of the action <" +
    -          Bytes.toStringBinary(action.getRow()) + "> doesn't match the original one <" +
    -          Bytes.toStringBinary(this.row) + ">");
    +        throw new IllegalArgumentException(
    +            "The row of the action <" + Bytes.toStringBinary(action.getRow())
    +                + "> doesn't match the original one <" + Bytes.toStringBinary(this.row) + ">");
           }
    -      Preconditions.checkState(op != null || filter != null, "condition is null. You need to"
    -        + " specify the condition by calling ifNotExists/ifEquals/ifMatches before building a"
    -        + " CheckAndMutate object");
    +      Preconditions.checkState(op != null || filter != null,
    +        "condition is null. You need to"
    +            + " specify the condition by calling ifNotExists/ifEquals/ifMatches before building a"
    +            + " CheckAndMutate object");
         }
     
         /**
    @@ -212,7 +213,6 @@ public CheckAndMutate build(RowMutations mutations) {
     
       /**
        * returns a builder object to build a CheckAndMutate object
    -   *
        * @param row row
        * @return a builder object
        */
    @@ -229,8 +229,8 @@ public static Builder newBuilder(byte[] row) {
       private final TimeRange timeRange;
       private final Row action;
     
    -  private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier,final CompareOperator op,
    -    byte[] value, TimeRange timeRange, Row action) {
    +  private CheckAndMutate(byte[] row, byte[] family, byte[] qualifier, final CompareOperator op,
    +      byte[] value, TimeRange timeRange, Row action) {
         this.row = row;
         this.family = family;
         this.qualifier = qualifier;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
    index 0c832acdb37a..f647a5880425 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientAsyncPrefetchScanner.java
    @@ -18,6 +18,7 @@
     package org.apache.hadoop.hbase.client;
     
     import static org.apache.hadoop.hbase.client.ConnectionUtils.calcEstimatedSize;
    +
     import java.io.IOException;
     import java.io.InterruptedIOException;
     import java.util.Queue;
    @@ -36,14 +37,13 @@
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * ClientAsyncPrefetchScanner implements async scanner behaviour.
    - * Specifically, the cache used by this scanner is a concurrent queue which allows both
    - * the producer (hbase client) and consumer (application) to access the queue in parallel.
    - * The number of rows returned in a prefetch is defined by the caching factor and the result size
    - * factor.
    - * This class allocates a buffer cache, whose size is a function of both factors.
    - * The prefetch is invoked when the cache is half­filled, instead of waiting for it to be empty.
    - * This is defined in the method {@link ClientAsyncPrefetchScanner#prefetchCondition()}.
    + * ClientAsyncPrefetchScanner implements async scanner behaviour. Specifically, the cache used by
    + * this scanner is a concurrent queue which allows both the producer (hbase client) and consumer
    + * (application) to access the queue in parallel. The number of rows returned in a prefetch is
    + * defined by the caching factor and the result size factor. This class allocates a buffer cache,
    + * whose size is a function of both factors. The prefetch is invoked when the cache is half­filled,
    + * instead of waiting for it to be empty. This is defined in the method
    + * {@link ClientAsyncPrefetchScanner#prefetchCondition()}.
      */
     @InterfaceAudience.Private
     public class ClientAsyncPrefetchScanner extends ClientSimpleScanner {
    @@ -132,8 +132,8 @@ protected void addEstimatedSize(long estimatedSize) {
       }
     
       private void handleException() throws IOException {
    -    //The prefetch task running in the background puts any exception it
    -    //catches into this exception queue.
    +    // The prefetch task running in the background puts any exception it
    +    // catches into this exception queue.
         // Rethrow the exception so the application can handle it.
         while (!exceptionsQueue.isEmpty()) {
           Exception first = exceptionsQueue.peek();
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    index 11b2f7883a01..36722c2894d5 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientCoprocessorRpcController.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,7 +19,6 @@
     
     import com.google.protobuf.RpcCallback;
     import com.google.protobuf.RpcController;
    -
     import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    index 9125132e66c5..758cf508578a 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -20,27 +19,27 @@
     
     import java.io.IOException;
     import java.lang.management.ManagementFactory;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
     import org.apache.hadoop.hbase.util.Addressing;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
    +import org.apache.yetus.audience.InterfaceAudience;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
     
     /**
    - * The class that is able to determine some unique strings for the client,
    - * such as an IP address, PID, and composite deterministic ID.
    + * The class that is able to determine some unique strings for the client, such as an IP address,
    + * PID, and composite deterministic ID.
      */
     @InterfaceAudience.Private
     final class ClientIdGenerator {
       private static final Logger LOG = LoggerFactory.getLogger(ClientIdGenerator.class);
     
    -  private ClientIdGenerator() {}
    +  private ClientIdGenerator() {
    +  }
     
       /**
    -   * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill...
    -   * Note though that new UUID in java by default is just a random number.
    +   * @return a unique ID incorporating IP address, PID, TID and timer. Might be an overkill... Note
    +   *         though that new UUID in java by default is just a random number.
        */
       public static byte[] generateClientId() {
         byte[] selfBytes = getIpAddressBytes();
    @@ -78,8 +77,8 @@ public static Long getPid() {
       }
     
       /**
    -   * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual
    -   *         and not a loopback address. Empty array if none can be found or error occurred.
    +   * @return Some IPv4/IPv6 address available on the current machine that is up, not virtual and not
    +   *         a loopback address. Empty array if none can be found or error occurred.
        */
       public static byte[] getIpAddressBytes() {
         try {
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
    index ac5a7110a06e..4913b23f5bd9 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientScanner.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -40,13 +40,14 @@
     import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
     import org.apache.hadoop.hbase.regionserver.LeaseException;
     import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
    -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
     import org.apache.yetus.audience.InterfaceAudience;
     import org.slf4j.Logger;
     import org.slf4j.LoggerFactory;
     
    +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
    +
     /**
      * Implements the scanner interface for the HBase client. If there are multiple regions in a table,
      * this scanner will iterate through them all.
    @@ -117,7 +118,7 @@ public ClientScanner(final Configuration conf, final Scan scan, final TableName
             HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);
         }
         this.scannerTimeout = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
    -        HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
    +      HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
     
         // check if application wants to collect scan metrics
         initScanMetrics(scan);
    @@ -140,8 +141,8 @@ public ClientScanner(final Configuration conf, final Scan scan, final TableName
       }
     
       protected final int getScanReplicaId() {
    -    return scan.getReplicaId() >= RegionReplicaUtil.DEFAULT_REPLICA_ID ? scan.getReplicaId() :
    -      RegionReplicaUtil.DEFAULT_REPLICA_ID;
    +    return scan.getReplicaId() >= RegionReplicaUtil.DEFAULT_REPLICA_ID ? scan.getReplicaId()
    +        : RegionReplicaUtil.DEFAULT_REPLICA_ID;
       }
     
       protected ClusterConnection getConnection() {
    @@ -238,8 +239,8 @@ protected boolean moveToNextRegion() {
         if (LOG.isDebugEnabled() && this.currentRegion != null) {
           // Only worth logging if NOT first region in scan.
           LOG.debug(
    -        "Advancing internal scanner to startKey at '" + Bytes.toStringBinary(scan.getStartRow()) +
    -            "', " + (scan.includeStartRow() ? "inclusive" : "exclusive"));
    +        "Advancing internal scanner to startKey at '" + Bytes.toStringBinary(scan.getStartRow())
    +            + "', " + (scan.includeStartRow() ? "inclusive" : "exclusive"));
         }
         // clear the current region, we will set a new value to it after the first call of the new
         // callable.
    @@ -331,8 +332,8 @@ private boolean regionExhausted(Result[] values) {
         // old time we always return empty result for a open scanner operation so we add a check here to
         // keep compatible with the old logic. Should remove the isOpenScanner in the future.
         // 2. Server tells us that it has no more results for this region.
    -    return (values.length == 0 && !callable.isHeartbeatMessage()) ||
    -        callable.moreResultsInRegion() == MoreResults.NO;
    +    return (values.length == 0 && !callable.isHeartbeatMessage())
    +        || callable.moreResultsInRegion() == MoreResults.NO;
       }
     
       private void closeScannerIfExhausted(boolean exhausted) throws IOException {
    @@ -362,10 +363,10 @@ private void handleScanError(DoNotRetryIOException e,
         // If exception is any but the list below throw it back to the client; else setup
         // the scanner and retry.
         Throwable cause = e.getCause();
    -    if ((cause != null && cause instanceof NotServingRegionException) ||
    -        (cause != null && cause instanceof RegionServerStoppedException) ||
    -        e instanceof OutOfOrderScannerNextException || e instanceof UnknownScannerException ||
    -        e instanceof ScannerResetException || e instanceof LeaseException) {
    +    if ((cause != null && cause instanceof NotServingRegionException)
    +        || (cause != null && cause instanceof RegionServerStoppedException)
    +        || e instanceof OutOfOrderScannerNextException || e instanceof UnknownScannerException
    +        || e instanceof ScannerResetException || e instanceof LeaseException) {
           // Pass. It is easier writing the if loop test as list of what is allowed rather than
           // as a list of what is not allowed... so if in here, it means we do not throw.
           if (retriesLeft <= 0) {
    @@ -489,8 +490,8 @@ protected void loadCache() throws IOException {
               // processing of the scan is taking a long time server side. Rather than continue to
               // loop until a limit (e.g. size or caching) is reached, break out early to avoid causing
               // unnecesary delays to the caller
    -          LOG.trace("Heartbeat message received and cache contains Results. " +
    -            "Breaking out of scan loop");
    +          LOG.trace("Heartbeat message received and cache contains Results. "
    +              + "Breaking out of scan loop");
               // we know that the region has not been exhausted yet so just break without calling
               // closeScannerIfExhausted
               break;
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
    index f118e7a03f7c..235002ec4cbe 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientServiceCallable.java
    @@ -1,12 +1,13 @@
     /*
    - * Licensed to the Apache Software Foundation (ASF) under one or more
    - * contributor license agreements.  See the NOTICE file distributed with
    - * this work for additional information regarding copyright ownership.
    - * The ASF licenses this file to you under the Apache License, Version 2.0
    - * (the "License"); you may not use this file except in compliance with
    - * the License.  You may obtain a copy of the License at
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements.  See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership.  The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License.  You may obtain a copy of the License at
      *
    - * http://www.apache.org/licenses/LICENSE-2.0
    + *     http://www.apache.org/licenses/LICENSE-2.0
      *
      * Unless required by applicable law or agreed to in writing, software
      * distributed under the License is distributed on an "AS IS" BASIS,
    @@ -17,24 +18,24 @@
     package org.apache.hadoop.hbase.client;
     
     import java.io.IOException;
    -
     import org.apache.hadoop.hbase.ServerName;
     import org.apache.hadoop.hbase.TableName;
     import org.apache.yetus.audience.InterfaceAudience;
    -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    +
     import org.apache.hbase.thirdparty.com.google.protobuf.RpcController;
     
    +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
    +
     /**
    - * A RegionServerCallable set to use the Client protocol.
    - * Also includes some utility methods so can hide protobuf references here rather than have them
    - * sprinkled about the code base.
    + * A RegionServerCallable set to use the Client protocol. Also includes some utility methods so can
    + * hide protobuf references here rather than have them sprinkled about the code base.
      * @param 
      */
     @InterfaceAudience.Private
    -public abstract class ClientServiceCallable extends
    -    RegionServerCallable {
    +public abstract class ClientServiceCallable
    +    extends RegionServerCallable {
     
    -  public ClientServiceCallable(Connection connection, TableName tableName, byte [] row,
    +  public ClientServiceCallable(Connection connection, TableName tableName, byte[] row,
           RpcController rpcController, int priority) {
         super(connection, tableName, row, rpcController, priority);
       }
    @@ -46,12 +47,12 @@ protected void setStubByServiceName(ServerName serviceName) throws IOException {
     
       // Below here are simple methods that contain the stub and the rpcController.
       protected ClientProtos.GetResponse doGet(ClientProtos.GetRequest request)
    -  throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
    +      throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
         return getStub().get(getRpcController(), request);
       }
     
       protected ClientProtos.MutateResponse doMutate(ClientProtos.MutateRequest request)
    -  throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
    +      throws org.apache.hbase.thirdparty.com.google.protobuf.ServiceException {
         return getStub().mutate(getRpcController(), request);
       }
     }
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
    index 2211f8696efd..35735507c0f3 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientSimpleScanner.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -23,16 +23,14 @@
     
     import java.io.IOException;
     import java.util.concurrent.ExecutorService;
    -
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.hbase.TableName;
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * ClientSimpleScanner implements a sync scanner behaviour.
    - * The cache is a simple list.
    - * The prefetch is invoked only when the application finished processing the entire cache.
    + * ClientSimpleScanner implements a sync scanner behaviour. The cache is a simple list. The prefetch
    + * is invoked only when the application finished processing the entire cache.
      */
     @InterfaceAudience.Private
     public class ClientSimpleScanner extends ClientScanner {
    @@ -61,6 +59,6 @@ protected ScannerCallable createScannerCallable() {
           scan.withStartRow(createClosestRowAfter(scan.getStartRow()), true);
         }
         return new ScannerCallable(getConnection(), getTable(), scan, this.scanMetrics,
    -      this.rpcControllerFactory, getScanReplicaId());
    +        this.rpcControllerFactory, getScanReplicaId());
       }
     }
    diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    index ba447d5a81ba..44eef0668f03 100644
    --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientUtil.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -25,7 +25,6 @@
     @InterfaceAudience.Private
     public class ClientUtil {
     
    -
       public static boolean areScanStartRowAndStopRowEqual(byte[] startRow, byte[] stopRow) {
         return startRow != null && startRow.length > 0 && Bytes.equals(startRow, stopRow);
       }
    @@ -35,19 +34,23 @@ public static Cursor createCursor(byte[] row) {
       }
     
       /**
    -   * 

    When scanning for a prefix the scan should stop immediately after the the last row that - * has the specified prefix. This method calculates the closest next rowKey immediately following - * the given rowKeyPrefix.

    - *

    IMPORTANT: This converts a rowKeyPrefix into a rowKey.

    - *

    If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can - * simply increment the last byte of the array. - * But if your application uses real binary rowids you may run into the scenario that your - * prefix is something like:

    + *

    + * When scanning for a prefix the scan should stop immediately after the the last row that has the + * specified prefix. This method calculates the closest next rowKey immediately following the + * given rowKeyPrefix. + *

    + *

    + * IMPORTANT: This converts a rowKeyPrefix into a rowKey. + *

    + *

    + * If the prefix is an 'ASCII' string put into a byte[] then this is easy because you can simply + * increment the last byte of the array. But if your application uses real binary rowids you may + * run into the scenario that your prefix is something like: + *

    *    { 0x12, 0x23, 0xFF, 0xFF }
    * Then this stopRow needs to be fed into the actual scan
    *    { 0x12, 0x24 } (Notice that it is shorter now)
    * This method calculates the correct stop row value for this usecase. - * * @param rowKeyPrefix the rowKeyPrefix. * @return the closest next rowKey immediately following the given rowKeyPrefix. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java index 277056137f76..7b83def86828 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterConnection.java @@ -1,6 +1,4 @@ -/** - * - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +42,8 @@ public interface ClusterConnection extends Connection { /** - * Key for configuration in Configuration whose value is the class we implement making a - * new Connection instance. + * Key for configuration in Configuration whose value is the class we implement making a new + * Connection instance. */ String HBASE_CLIENT_CONNECTION_IMPL = "hbase.client.connection.impl"; @@ -54,28 +52,21 @@ public interface ClusterConnection extends Connection { * @deprecated this has been deprecated without a replacement */ @Deprecated - boolean isMasterRunning() - throws MasterNotRunningException, ZooKeeperConnectionException; + boolean isMasterRunning() throws MasterNotRunningException, ZooKeeperConnectionException; /** - * Use this api to check if the table has been created with the specified number of - * splitkeys which was used while creating the given table. - * Note : If this api is used after a table's region gets splitted, the api may return - * false. - * @param tableName - * tableName - * @param splitKeys - * splitKeys used while creating table - * @throws IOException - * if a remote or network exception occurs + * Use this api to check if the table has been created with the specified number of splitkeys + * which was used while creating the given table. Note : If this api is used after a table's + * region gets splitted, the api may return false. + * @param tableName tableName + * @param splitKeys splitKeys used while creating table + * @throws IOException if a remote or network exception occurs */ - boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws - IOException; + boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws IOException; /** - * A table that isTableEnabled == false and isTableDisabled == false - * is possible. This happens when a table has a lot of regions - * that must be processed. + * A table that isTableEnabled == false and isTableDisabled == false is possible. This happens + * when a table has a lot of regions that must be processed. * @param tableName table name * @return true if the table is enabled, false otherwise * @throws IOException if a remote or network exception occurs @@ -94,19 +85,16 @@ boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws * @param tableName table state for * @return state of the table */ - TableState getTableState(TableName tableName) throws IOException; + TableState getTableState(TableName tableName) throws IOException; /** - * Find the location of the region of tableName that row - * lives in. + * Find the location of the region of tableName that row lives in. * @param tableName name of the table row is in * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question + * @return HRegionLocation that describes where to find the region in question * @throws IOException if a remote or network exception occurs */ - HRegionLocation locateRegion(final TableName tableName, - final byte [] row) throws IOException; + HRegionLocation locateRegion(final TableName tableName, final byte[] row) throws IOException; /** * @deprecated {@link #clearRegionLocationCache()} instead. @@ -119,10 +107,8 @@ default void clearRegionCache() { void cacheLocation(final TableName tableName, final RegionLocations location); /** - * Allows flushing the region cache of all locations that pertain to - * tableName - * @param tableName Name of the table whose regions we are to remove from - * cache. + * Allows flushing the region cache of all locations that pertain to tableName + * @param tableName Name of the table whose regions we are to remove from cache. */ void clearRegionCache(final TableName tableName); @@ -133,33 +119,30 @@ default void clearRegionCache() { void deleteCachedRegionLocation(final HRegionLocation location); /** - * Find the location of the region of tableName that row - * lives in, ignoring any value that might be in the cache. + * Find the location of the region of tableName that row lives in, ignoring any + * value that might be in the cache. * @param tableName name of the table row is in * @param row row key you're trying to find the region of - * @return HRegionLocation that describes where to find the region in - * question + * @return HRegionLocation that describes where to find the region in question * @throws IOException if a remote or network exception occurs */ - HRegionLocation relocateRegion(final TableName tableName, - final byte [] row) throws IOException; + HRegionLocation relocateRegion(final TableName tableName, final byte[] row) throws IOException; /** - * Find the location of the region of tableName that row - * lives in, ignoring any value that might be in the cache. + * Find the location of the region of tableName that row lives in, ignoring any + * value that might be in the cache. * @param tableName name of the table row is in * @param row row key you're trying to find the region of * @param replicaId the replicaId of the region - * @return RegionLocations that describe where to find the region in - * question + * @return RegionLocations that describe where to find the region in question * @throws IOException if a remote or network exception occurs */ - RegionLocations relocateRegion(final TableName tableName, - final byte [] row, int replicaId) throws IOException; + RegionLocations relocateRegion(final TableName tableName, final byte[] row, int replicaId) + throws IOException; /** * Update the location cache. This is used internally by HBase, in most cases it should not be - * used by the client application. + * used by the client application. * @param tableName the table name * @param regionName the region name * @param rowkey the row @@ -167,13 +150,12 @@ RegionLocations relocateRegion(final TableName tableName, * @param source the previous location */ void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey, - Object exception, ServerName source); + Object exception, ServerName source); /** * Gets the location of the region of regionName. * @param regionName name of the region to locate - * @return HRegionLocation that describes where to find the region in - * question + * @return HRegionLocation that describes where to find the region in question * @throws IOException if a remote or network exception occurs */ HRegionLocation locateRegion(final byte[] regionName) throws IOException; @@ -195,12 +177,10 @@ void updateCachedLocations(TableName tableName, byte[] regionName, byte[] rowkey * @return list of region locations for all regions of table * @throws IOException if IO failure occurs */ - List locateRegions(final TableName tableName, - final boolean useCache, + List locateRegions(final TableName tableName, final boolean useCache, final boolean offlined) throws IOException; /** - * * @param tableName table to get regions of * @param row the row * @param useCache Should we use the cache to retrieve the region information. @@ -208,21 +188,20 @@ List locateRegions(final TableName tableName, * @return region locations for this row. * @throws IOException if IO failure occurs */ - RegionLocations locateRegion(TableName tableName, - byte[] row, boolean useCache, boolean retry) throws IOException; - - /** - * - * @param tableName table to get regions of - * @param row the row - * @param useCache Should we use the cache to retrieve the region information. - * @param retry do we retry - * @param replicaId the replicaId for the region - * @return region locations for this row. - * @throws IOException if IO failure occurs - */ + RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry) + throws IOException; + + /** + * @param tableName table to get regions of + * @param row the row + * @param useCache Should we use the cache to retrieve the region information. + * @param retry do we retry + * @param replicaId the replicaId for the region + * @return region locations for this row. + * @throws IOException if IO failure occurs + */ RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, boolean retry, - int replicaId) throws IOException; + int replicaId) throws IOException; /** * Returns a {@link MasterKeepAliveConnection} to the active master @@ -243,13 +222,11 @@ RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, AdminService.BlockingInterface getAdmin(final ServerName serverName) throws IOException; /** - * Establishes a connection to the region server at the specified address, and returns - * a region client protocol. - * + * Establishes a connection to the region server at the specified address, and returns a region + * client protocol. * @param serverName the region server to connect to * @return ClientProtocol proxy for RegionServer * @throws IOException if a remote or network exception occurs - * */ ClientService.BlockingInterface getClient(final ServerName serverName) throws IOException; @@ -281,8 +258,8 @@ HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reloa AsyncProcess getAsyncProcess(); /** - * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. - * This RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be + * Returns a new RpcRetryingCallerFactory from the given {@link Configuration}. This + * RpcRetryingCallerFactory lets the users create {@link RpcRetryingCaller}s which can be * intercepted with the configured {@link RetryingCallerInterceptor} * @param conf configuration * @return RpcRetryingCallerFactory @@ -329,7 +306,7 @@ HRegionLocation getRegionLocation(TableName tableName, byte[] row, boolean reloa * Get live region servers from masters. */ List getLiveRegionServers(Supplier masterAddrTracker, int count) - throws IOException; + throws IOException; /** * Get the bootstrap node list of another region server. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java index 1370d07c5fb3..3eab89fee810 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClusterStatusListener.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.Closeable; @@ -37,6 +35,10 @@ import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.ExceptionUtil; import org.apache.hadoop.hbase.util.Threads; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap; import org.apache.hbase.thirdparty.io.netty.buffer.ByteBufInputStream; @@ -48,15 +50,13 @@ import org.apache.hbase.thirdparty.io.netty.channel.socket.DatagramChannel; import org.apache.hbase.thirdparty.io.netty.channel.socket.DatagramPacket; import org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioDatagramChannel; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * A class that receives the cluster status, and provide it as a set of service to the client. - * Today, manages only the dead server list. - * The class is abstract to allow multiple implementations, from ZooKeeper to multicast based. + * Today, manages only the dead server list. The class is abstract to allow multiple + * implementations, from ZooKeeper to multicast based. */ @InterfaceAudience.Private class ClusterStatusListener implements Closeable { @@ -80,13 +80,11 @@ public interface DeadServerHandler { /** * Called when a server is identified as dead. Called only once even if we receive the * information multiple times. - * * @param sn - the server name */ void newDead(ServerName sn); } - /** * The interface to be implemented by a listener of a cluster status event. */ @@ -99,7 +97,6 @@ interface Listener extends Closeable { /** * Called to connect. - * * @param conf Configuration to use. * @throws IOException if failing to connect */ @@ -107,7 +104,7 @@ interface Listener extends Closeable { } public ClusterStatusListener(DeadServerHandler dsh, Configuration conf, - Class listenerClass) throws IOException { + Class listenerClass) throws IOException { this.deadServerHandler = dsh; try { Constructor ctor = @@ -128,7 +125,6 @@ public ClusterStatusListener(DeadServerHandler dsh, Configuration conf, /** * Acts upon the reception of a new cluster status. - * * @param ncs the cluster status */ public void receive(ClusterMetrics ncs) { @@ -152,7 +148,6 @@ public void close() { /** * Check if we know if a server is dead. - * * @param sn the server name to check. * @return true if we know for sure that the server is dead, false otherwise. */ @@ -162,9 +157,8 @@ public boolean isDeadServer(ServerName sn) { } for (ServerName dead : deadServers) { - if (dead.getStartcode() >= sn.getStartcode() && - dead.getPort() == sn.getPort() && - dead.getHostname().equals(sn.getHostname())) { + if (dead.getStartcode() >= sn.getStartcode() && dead.getPort() == sn.getPort() + && dead.getHostname().equals(sn.getHostname())) { return true; } } @@ -172,7 +166,6 @@ public boolean isDeadServer(ServerName sn) { return false; } - /** * An implementation using a multicast message between the master & the client. */ @@ -180,8 +173,9 @@ public boolean isDeadServer(ServerName sn) { class MulticastListener implements Listener { private DatagramChannel channel; private final EventLoopGroup group = new NioEventLoopGroup(1, - new ThreadFactoryBuilder().setNameFormat("hbase-client-clusterStatusListener-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadFactoryBuilder().setNameFormat("hbase-client-clusterStatusListener-pool-%d") + .setDaemon(true).setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER) + .build()); public MulticastListener() { } @@ -190,11 +184,11 @@ public MulticastListener() { public void connect(Configuration conf) throws IOException { String mcAddress = conf.get(HConstants.STATUS_MULTICAST_ADDRESS, - HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); + HConstants.DEFAULT_STATUS_MULTICAST_ADDRESS); String bindAddress = conf.get(HConstants.STATUS_MULTICAST_BIND_ADDRESS, HConstants.DEFAULT_STATUS_MULTICAST_BIND_ADDRESS); - int port = conf.getInt(HConstants.STATUS_MULTICAST_PORT, - HConstants.DEFAULT_STATUS_MULTICAST_PORT); + int port = + conf.getInt(HConstants.STATUS_MULTICAST_PORT, HConstants.DEFAULT_STATUS_MULTICAST_PORT); String niName = conf.get(HConstants.STATUS_MULTICAST_NI_NAME); InetAddress ina; @@ -207,11 +201,9 @@ public void connect(Configuration conf) throws IOException { try { Bootstrap b = new Bootstrap(); - b.group(group) - .channel(NioDatagramChannel.class) - .option(ChannelOption.SO_REUSEADDR, true) - .handler(new ClusterStatusHandler()); - channel = (DatagramChannel)b.bind(bindAddress, port).sync().channel(); + b.group(group).channel(NioDatagramChannel.class).option(ChannelOption.SO_REUSEADDR, true) + .handler(new ClusterStatusHandler()); + channel = (DatagramChannel) b.bind(bindAddress, port).sync().channel(); } catch (InterruptedException e) { close(); throw ExceptionUtil.asInterrupt(e); @@ -228,7 +220,6 @@ public void connect(Configuration conf) throws IOException { channel.joinGroup(ina, ni, null, channel.newPromise()); } - @Override public void close() { if (channel != null) { @@ -238,17 +229,13 @@ public void close() { group.shutdownGracefully(); } - - /** * Class, conforming to the Netty framework, that manages the message received. */ private class ClusterStatusHandler extends SimpleChannelInboundHandler { @Override - public void exceptionCaught( - ChannelHandlerContext ctx, Throwable cause) - throws Exception { + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { LOG.error("Unexpected exception, continuing.", cause); } @@ -257,7 +244,6 @@ public boolean acceptInboundMessage(Object msg) throws Exception { return super.acceptInboundMessage(msg); } - @Override protected void channelRead0(ChannelHandlerContext ctx, DatagramPacket dp) throws Exception { ByteBufInputStream bis = new ByteBufInputStream(dp.content()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java index 001d672620ea..806b7d4ec371 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,71 +20,68 @@ import java.util.Comparator; import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.KeepDeletedCells; import org.apache.hadoop.hbase.MemoryCompactionPolicy; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * An ColumnFamilyDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. - * - * To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods + * An ColumnFamilyDescriptor contains information about a column family such as the number of + * versions, compression settings, etc. It is used as input when creating a table or adding a + * column. To construct a new instance, use the {@link ColumnFamilyDescriptorBuilder} methods * @since 2.0.0 */ @InterfaceAudience.Public public interface ColumnFamilyDescriptor { @InterfaceAudience.Private - static final Comparator COMPARATOR - = (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { - int result = Bytes.compareTo(lhs.getName(), rhs.getName()); - if (result != 0) { - return result; - } - // punt on comparison for ordering, just calculate difference. - result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); - if (result != 0) { - return result; - } - return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); - }; - - static final Bytes REPLICATION_SCOPE_BYTES = new Bytes( - Bytes.toBytes(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE)); + static final Comparator COMPARATOR = + (ColumnFamilyDescriptor lhs, ColumnFamilyDescriptor rhs) -> { + int result = Bytes.compareTo(lhs.getName(), rhs.getName()); + if (result != 0) { + return result; + } + // punt on comparison for ordering, just calculate difference. + result = lhs.getValues().hashCode() - rhs.getValues().hashCode(); + if (result != 0) { + return result; + } + return lhs.getConfiguration().hashCode() - rhs.getConfiguration().hashCode(); + }; + + static final Bytes REPLICATION_SCOPE_BYTES = + new Bytes(Bytes.toBytes(ColumnFamilyDescriptorBuilder.REPLICATION_SCOPE)); @InterfaceAudience.Private - static final Comparator COMPARATOR_IGNORE_REPLICATION = ( - ColumnFamilyDescriptor lcf, ColumnFamilyDescriptor rcf) -> { - int result = Bytes.compareTo(lcf.getName(), rcf.getName()); - if (result != 0) { - return result; - } - // ColumnFamilyDescriptor.getValues is a immutable map, so copy it and remove - // REPLICATION_SCOPE_BYTES - Map lValues = new HashMap<>(); - lValues.putAll(lcf.getValues()); - lValues.remove(REPLICATION_SCOPE_BYTES); - Map rValues = new HashMap<>(); - rValues.putAll(rcf.getValues()); - rValues.remove(REPLICATION_SCOPE_BYTES); - result = lValues.hashCode() - rValues.hashCode(); - if (result != 0) { - return result; - } - return lcf.getConfiguration().hashCode() - rcf.getConfiguration().hashCode(); - }; + static final Comparator COMPARATOR_IGNORE_REPLICATION = + (ColumnFamilyDescriptor lcf, ColumnFamilyDescriptor rcf) -> { + int result = Bytes.compareTo(lcf.getName(), rcf.getName()); + if (result != 0) { + return result; + } + // ColumnFamilyDescriptor.getValues is a immutable map, so copy it and remove + // REPLICATION_SCOPE_BYTES + Map lValues = new HashMap<>(); + lValues.putAll(lcf.getValues()); + lValues.remove(REPLICATION_SCOPE_BYTES); + Map rValues = new HashMap<>(); + rValues.putAll(rcf.getValues()); + rValues.remove(REPLICATION_SCOPE_BYTES); + result = lValues.hashCode() - rValues.hashCode(); + if (result != 0) { + return result; + } + return lcf.getConfiguration().hashCode() - rcf.getConfiguration().hashCode(); + }; /** * @return The storefile/hfile blocksize for this column family. */ int getBlocksize(); + /** * @return bloom filter type used for new StoreFiles in ColumnFamily */ @@ -114,20 +111,23 @@ public interface ColumnFamilyDescriptor { * @return an unmodifiable map. */ Map getConfiguration(); + /** * @param key the key whose associated value is to be returned * @return accessing the configuration value by key. */ String getConfigurationValue(String key); + /** * @return replication factor set for this CF */ short getDFSReplication(); + /** - * @return the data block encoding algorithm used in block cache and - * optionally on disk + * @return the data block encoding algorithm used in block cache and optionally on disk */ DataBlockEncoding getDataBlockEncoding(); + /** * @return Return the raw crypto key attribute for the family, or null if not set */ @@ -137,35 +137,41 @@ public interface ColumnFamilyDescriptor { * @return Return the encryption algorithm in use by this family */ String getEncryptionType(); + /** - * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for - * for this column family + * @return in-memory compaction policy if set for the cf. Returns null if no policy is set for for + * this column family */ MemoryCompactionPolicy getInMemoryCompaction(); + /** * @return return the KeepDeletedCells */ KeepDeletedCells getKeepDeletedCells(); + /** * @return maximum number of versions */ int getMaxVersions(); + /** * @return The minimum number of versions to keep. */ int getMinVersions(); + /** * Get the mob compact partition policy for this family * @return MobCompactPartitionPolicy */ MobCompactPartitionPolicy getMobCompactPartitionPolicy(); + /** - * Gets the mob threshold of the family. - * If the size of a cell value is larger than this threshold, it's regarded as a mob. - * The default threshold is 1024*100(100K)B. + * Gets the mob threshold of the family. If the size of a cell value is larger than this + * threshold, it's regarded as a mob. The default threshold is 1024*100(100K)B. * @return The mob threshold. */ long getMobThreshold(); + /** * @return a copy of Name of this column family */ @@ -176,45 +182,53 @@ public interface ColumnFamilyDescriptor { */ String getNameAsString(); - /** - * @return the scope tag - */ + /** + * @return the scope tag + */ int getScope(); + /** * Not using {@code enum} here because HDFS is not using {@code enum} for storage policy, see * org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite for more details. * @return Return the storage policy in use by this family */ String getStoragePolicy(); - /** + + /** * @return Time-to-live of cell contents, in seconds. */ int getTimeToLive(); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ Bytes getValue(Bytes key); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ String getValue(String key); + /** * @param key The key. * @return A clone value. Null if no mapping for the key */ byte[] getValue(byte[] key); + /** * It clone all bytes of all elements. * @return All values */ Map getValues(); + /** * @return True if hfile DATA type blocks should be cached (You cannot disable caching of INDEX - * and BLOOM type blocks). + * and BLOOM type blocks). */ boolean isBlockCacheEnabled(); + /** * @return true if we should cache bloomfilter blocks on write */ @@ -224,29 +238,35 @@ public interface ColumnFamilyDescriptor { * @return true if we should cache data blocks on write */ boolean isCacheDataOnWrite(); + /** * @return true if we should cache index blocks on write */ boolean isCacheIndexesOnWrite(); + /** * @return Whether KV tags should be compressed along with DataBlockEncoding. When no * DataBlockEncoding is been used, this is having no effect. */ boolean isCompressTags(); + /** * @return true if we should evict cached blocks from the blockcache on close */ boolean isEvictBlocksOnClose(); + /** - * @return True if we are to favor keeping all values for this column family in the - * HRegionServer cache. + * @return True if we are to favor keeping all values for this column family in the HRegionServer + * cache. */ boolean isInMemory(); + /** * Gets whether the mob is enabled for the family. * @return True if the mob is enabled for the family. */ boolean isMobEnabled(); + /** * @return true if we should prefetch blocks into the blockcache on open */ @@ -258,9 +278,9 @@ public interface ColumnFamilyDescriptor { String toStringCustomizedValues(); /** - * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts - * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. - * We will also consider mvcc in versions. See HBASE-15968 for details. + * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts will + * mask a later Put with lower ts. Set this to true to enable new semantics of versions. We will + * also consider mvcc in versions. See HBASE-15968 for details. */ boolean isNewVersionBehavior(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index 5dccd0b40c5c..47142a874dfd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,20 +49,21 @@ public class ColumnFamilyDescriptorBuilder { // For future backward compatibility - // Version 3 was when column names become byte arrays and when we picked up - // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. - // Version 5 was when bloom filter descriptors were removed. - // Version 6 adds metadata as a map where keys and values are byte[]. - // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) - // Version 8 -- reintroduction of bloom filters, changed from boolean to enum - // Version 9 -- add data block encoding + // Version 3 was when column names become byte arrays and when we picked up + // Time-to-live feature. Version 4 was when we moved to byte arrays, HBASE-82. + // Version 5 was when bloom filter descriptors were removed. + // Version 6 adds metadata as a map where keys and values are byte[]. + // Version 7 -- add new compression and hfile blocksize to HColumnDescriptor (HBASE-1217) + // Version 8 -- reintroduction of bloom filters, changed from boolean to enum + // Version 9 -- add data block encoding // Version 10 -- change metadata to standard type. // Version 11 -- add column family level configuration. private static final byte COLUMN_DESCRIPTOR_VERSION = (byte) 11; @InterfaceAudience.Private public static final String IN_MEMORY_COMPACTION = "IN_MEMORY_COMPACTION"; - private static final Bytes IN_MEMORY_COMPACTION_BYTES = new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); + private static final Bytes IN_MEMORY_COMPACTION_BYTES = + new Bytes(Bytes.toBytes(IN_MEMORY_COMPACTION)); @InterfaceAudience.Private public static final String IN_MEMORY = HConstants.IN_MEMORY; @@ -74,53 +75,59 @@ public class ColumnFamilyDescriptorBuilder { private static final Bytes COMPRESSION_BYTES = new Bytes(Bytes.toBytes(COMPRESSION)); @InterfaceAudience.Private public static final String COMPRESSION_COMPACT = "COMPRESSION_COMPACT"; - private static final Bytes COMPRESSION_COMPACT_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); + private static final Bytes COMPRESSION_COMPACT_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT)); public static final String COMPRESSION_COMPACT_MAJOR = "COMPRESSION_COMPACT_MAJOR"; - private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); + private static final Bytes COMPRESSION_COMPACT_MAJOR_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MAJOR)); public static final String COMPRESSION_COMPACT_MINOR = "COMPRESSION_COMPACT_MINOR"; - private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); + private static final Bytes COMPRESSION_COMPACT_MINOR_BYTES = + new Bytes(Bytes.toBytes(COMPRESSION_COMPACT_MINOR)); @InterfaceAudience.Private public static final String DATA_BLOCK_ENCODING = "DATA_BLOCK_ENCODING"; - private static final Bytes DATA_BLOCK_ENCODING_BYTES = new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); + private static final Bytes DATA_BLOCK_ENCODING_BYTES = + new Bytes(Bytes.toBytes(DATA_BLOCK_ENCODING)); /** - * Key for the BLOCKCACHE attribute. A more exact name would be - * CACHE_DATA_ON_READ because this flag sets whether or not we cache DATA - * blocks. We always cache INDEX and BLOOM blocks; caching these blocks cannot - * be disabled. + * Key for the BLOCKCACHE attribute. A more exact name would be CACHE_DATA_ON_READ because this + * flag sets whether or not we cache DATA blocks. We always cache INDEX and BLOOM blocks; caching + * these blocks cannot be disabled. */ @InterfaceAudience.Private public static final String BLOCKCACHE = "BLOCKCACHE"; private static final Bytes BLOCKCACHE_BYTES = new Bytes(Bytes.toBytes(BLOCKCACHE)); @InterfaceAudience.Private public static final String CACHE_DATA_ON_WRITE = "CACHE_DATA_ON_WRITE"; - private static final Bytes CACHE_DATA_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); + private static final Bytes CACHE_DATA_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_DATA_ON_WRITE)); @InterfaceAudience.Private public static final String CACHE_INDEX_ON_WRITE = "CACHE_INDEX_ON_WRITE"; - private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); + private static final Bytes CACHE_INDEX_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_INDEX_ON_WRITE)); @InterfaceAudience.Private public static final String CACHE_BLOOMS_ON_WRITE = "CACHE_BLOOMS_ON_WRITE"; - private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); + private static final Bytes CACHE_BLOOMS_ON_WRITE_BYTES = + new Bytes(Bytes.toBytes(CACHE_BLOOMS_ON_WRITE)); @InterfaceAudience.Private public static final String EVICT_BLOCKS_ON_CLOSE = "EVICT_BLOCKS_ON_CLOSE"; - private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); + private static final Bytes EVICT_BLOCKS_ON_CLOSE_BYTES = + new Bytes(Bytes.toBytes(EVICT_BLOCKS_ON_CLOSE)); /** - * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, - * and DATA blocks of HFiles belonging to this family will be loaded into the - * cache as soon as the file is opened. These loads will not count as cache - * misses. + * Key for the PREFETCH_BLOCKS_ON_OPEN attribute. If set, all INDEX, BLOOM, and DATA blocks of + * HFiles belonging to this family will be loaded into the cache as soon as the file is opened. + * These loads will not count as cache misses. */ @InterfaceAudience.Private public static final String PREFETCH_BLOCKS_ON_OPEN = "PREFETCH_BLOCKS_ON_OPEN"; - private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); + private static final Bytes PREFETCH_BLOCKS_ON_OPEN_BYTES = + new Bytes(Bytes.toBytes(PREFETCH_BLOCKS_ON_OPEN)); /** - * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. - * Use smaller block sizes for faster random-access at expense of larger - * indices (more memory consumption). Note that this is a soft limit and that - * blocks have overhead (metadata, CRCs) so blocks will tend to be the size - * specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k - * means hbase data will align with an SSDs 4k page accesses (TODO). + * Size of storefile/hfile 'blocks'. Default is {@link #DEFAULT_BLOCKSIZE}. Use smaller block + * sizes for faster random-access at expense of larger indices (more memory consumption). Note + * that this is a soft limit and that blocks have overhead (metadata, CRCs) so blocks will tend to + * be the size specified here and then some; i.e. don't expect that setting BLOCKSIZE=4k means + * hbase data will align with an SSDs 4k page accesses (TODO). */ @InterfaceAudience.Private public static final String BLOCKSIZE = "BLOCKSIZE"; @@ -141,13 +148,14 @@ public class ColumnFamilyDescriptorBuilder { public static final String MIN_VERSIONS = "MIN_VERSIONS"; private static final Bytes MIN_VERSIONS_BYTES = new Bytes(Bytes.toBytes(MIN_VERSIONS)); /** - * Retain all cells across flushes and compactions even if they fall behind a - * delete tombstone. To see all retained cells, do a 'raw' scan; see - * Scan#setRaw or pass RAW => true attribute in the shell. + * Retain all cells across flushes and compactions even if they fall behind a delete tombstone. To + * see all retained cells, do a 'raw' scan; see Scan#setRaw or pass RAW => true attribute in + * the shell. */ @InterfaceAudience.Private public static final String KEEP_DELETED_CELLS = "KEEP_DELETED_CELLS"; - private static final Bytes KEEP_DELETED_CELLS_BYTES = new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); + private static final Bytes KEEP_DELETED_CELLS_BYTES = + new Bytes(Bytes.toBytes(KEEP_DELETED_CELLS)); @InterfaceAudience.Private public static final String COMPRESS_TAGS = "COMPRESS_TAGS"; private static final Bytes COMPRESS_TAGS_BYTES = new Bytes(Bytes.toBytes(COMPRESS_TAGS)); @@ -168,9 +176,10 @@ public class ColumnFamilyDescriptorBuilder { public static final long DEFAULT_MOB_THRESHOLD = 100 * 1024; // 100k @InterfaceAudience.Private public static final String MOB_COMPACT_PARTITION_POLICY = "MOB_COMPACT_PARTITION_POLICY"; - private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); - public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY - = MobCompactPartitionPolicy.DAILY; + private static final Bytes MOB_COMPACT_PARTITION_POLICY_BYTES = + new Bytes(Bytes.toBytes(MOB_COMPACT_PARTITION_POLICY)); + public static final MobCompactPartitionPolicy DEFAULT_MOB_COMPACT_PARTITION_POLICY = + MobCompactPartitionPolicy.DAILY; @InterfaceAudience.Private public static final String DFS_REPLICATION = "DFS_REPLICATION"; private static final Bytes DFS_REPLICATION_BYTES = new Bytes(Bytes.toBytes(DFS_REPLICATION)); @@ -180,7 +189,8 @@ public class ColumnFamilyDescriptorBuilder { private static final Bytes STORAGE_POLICY_BYTES = new Bytes(Bytes.toBytes(STORAGE_POLICY)); public static final String NEW_VERSION_BEHAVIOR = "NEW_VERSION_BEHAVIOR"; - private static final Bytes NEW_VERSION_BEHAVIOR_BYTES = new Bytes(Bytes.toBytes(NEW_VERSION_BEHAVIOR)); + private static final Bytes NEW_VERSION_BEHAVIOR_BYTES = + new Bytes(Bytes.toBytes(NEW_VERSION_BEHAVIOR)); public static final boolean DEFAULT_NEW_VERSION_BEHAVIOR = false; /** * Default compression type. @@ -203,8 +213,7 @@ public class ColumnFamilyDescriptorBuilder { public static final int DEFAULT_MIN_VERSIONS = 0; /** - * Default setting for whether to try and serve this column family from memory - * or not. + * Default setting for whether to try and serve this column family from memory or not. */ public static final boolean DEFAULT_IN_MEMORY = false; @@ -219,14 +228,12 @@ public class ColumnFamilyDescriptorBuilder { public static final boolean DEFAULT_BLOCKCACHE = true; /** - * Default setting for whether to cache data blocks on write if block caching - * is enabled. + * Default setting for whether to cache data blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_DATA_ON_WRITE = false; /** - * Default setting for whether to cache index blocks on write if block caching - * is enabled. + * Default setting for whether to cache index blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_INDEX_ON_WRITE = false; @@ -241,8 +248,7 @@ public class ColumnFamilyDescriptorBuilder { public static final BloomType DEFAULT_BLOOMFILTER = BloomType.ROW; /** - * Default setting for whether to cache bloom filter blocks on write if block - * caching is enabled. + * Default setting for whether to cache bloom filter blocks on write if block caching is enabled. */ public static final boolean DEFAULT_CACHE_BLOOMS_ON_WRITE = false; @@ -257,8 +263,7 @@ public class ColumnFamilyDescriptorBuilder { public static final int DEFAULT_REPLICATION_SCOPE = HConstants.REPLICATION_SCOPE_LOCAL; /** - * Default setting for whether to evict cached blocks from the blockcache on - * close. + * Default setting for whether to evict cached blocks from the blockcache on close. */ public static final boolean DEFAULT_EVICT_BLOCKS_ON_CLOSE = false; @@ -276,7 +281,8 @@ public class ColumnFamilyDescriptorBuilder { private static Map getDefaultValuesBytes() { Map values = new HashMap<>(); - DEFAULT_VALUES.forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); + DEFAULT_VALUES + .forEach((k, v) -> values.put(new Bytes(Bytes.toBytes(k)), new Bytes(Bytes.toBytes(v)))); return values; } @@ -326,10 +332,10 @@ public static Unit getUnit(String key) { /** * @param b Family name. * @return b - * @throws IllegalArgumentException If not null and not a legitimate family - * name: i.e. 'printable' and ends in a ':' (Null passes are allowed because - * b can be null when deserializing). Cannot start with a '.' - * either. Also Family can not be an empty value or equal "recovered.edits". + * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable' + * and ends in a ':' (Null passes are allowed because b can be null when + * deserializing). Cannot start with a '.' either. Also Family can not be an empty value + * or equal "recovered.edits". */ public static byte[] isLegalColumnFamilyName(final byte[] b) { if (b == null) { @@ -337,27 +343,28 @@ public static byte[] isLegalColumnFamilyName(final byte[] b) { } Preconditions.checkArgument(b.length != 0, "Column Family name can not be empty"); if (b[0] == '.') { - throw new IllegalArgumentException("Column Family names cannot start with a " - + "period: " + Bytes.toString(b)); + throw new IllegalArgumentException( + "Column Family names cannot start with a " + "period: " + Bytes.toString(b)); } for (int i = 0; i < b.length; i++) { if (Character.isISOControl(b[i]) || b[i] == ':' || b[i] == '\\' || b[i] == '/') { throw new IllegalArgumentException("Illegal character <" + b[i] - + ">. Column Family names cannot contain control characters or colons: " - + Bytes.toString(b)); + + ">. Column Family names cannot contain control characters or colons: " + + Bytes.toString(b)); } } byte[] recoveredEdit = Bytes.toBytes(HConstants.RECOVERED_EDITS_DIR); if (Bytes.equals(recoveredEdit, b)) { - throw new IllegalArgumentException("Column Family name cannot be: " - + HConstants.RECOVERED_EDITS_DIR); + throw new IllegalArgumentException( + "Column Family name cannot be: " + HConstants.RECOVERED_EDITS_DIR); } return b; } private final ModifyableColumnFamilyDescriptor desc; - public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) throws DeserializationException { + public static ColumnFamilyDescriptor parseFrom(final byte[] pbBytes) + throws DeserializationException { return ModifyableColumnFamilyDescriptor.parseFrom(pbBytes); } @@ -453,12 +460,14 @@ public ColumnFamilyDescriptorBuilder setCompactionCompressionType(Compression.Al return this; } - public ColumnFamilyDescriptorBuilder setMajorCompactionCompressionType(Compression.Algorithm value) { + public ColumnFamilyDescriptorBuilder + setMajorCompactionCompressionType(Compression.Algorithm value) { desc.setMajorCompactionCompressionType(value); return this; } - public ColumnFamilyDescriptorBuilder setMinorCompactionCompressionType(Compression.Algorithm value) { + public ColumnFamilyDescriptorBuilder + setMinorCompactionCompressionType(Compression.Algorithm value) { desc.setMinorCompactionCompressionType(value); return this; } @@ -532,7 +541,8 @@ public ColumnFamilyDescriptorBuilder setMinVersions(final int value) { return this; } - public ColumnFamilyDescriptorBuilder setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { + public ColumnFamilyDescriptorBuilder + setMobCompactPartitionPolicy(final MobCompactPartitionPolicy value) { desc.setMobCompactPartitionPolicy(value); return this; } @@ -599,11 +609,9 @@ public ColumnFamilyDescriptorBuilder setVersionsWithTimeToLive(final int retenti } /** - * An ModifyableFamilyDescriptor contains information about a column family such as the - * number of versions, compression settings, etc. - * - * It is used as input when creating a table or adding a column. - * TODO: make this package-private after removing the HColumnDescriptor + * An ModifyableFamilyDescriptor contains information about a column family such as the number of + * versions, compression settings, etc. It is used as input when creating a table or adding a + * column. TODO: make this package-private after removing the HColumnDescriptor */ @InterfaceAudience.Private public static class ModifyableColumnFamilyDescriptor @@ -616,20 +624,17 @@ public static class ModifyableColumnFamilyDescriptor private final Map values = new HashMap<>(); /** - * A map which holds the configuration specific to the column family. The - * keys of the map have the same names as config keys and override the - * defaults with cf-specific settings. Example usage may be for compactions, - * etc. + * A map which holds the configuration specific to the column family. The keys of the map have + * the same names as config keys and override the defaults with cf-specific settings. Example + * usage may be for compactions, etc. */ private final Map configuration = new HashMap<>(); /** - * Construct a column descriptor specifying only the family name The other - * attributes are defaulted. - * - * @param name Column family name. Must be 'printable' -- digit or - * letter -- and may not contain a : - * TODO: make this private after the HCD is removed. + * Construct a column descriptor specifying only the family name The other attributes are + * defaulted. + * @param name Column family name. Must be 'printable' -- digit or letter -- and may not contain + * a : TODO: make this private after the HCD is removed. */ @InterfaceAudience.Private public ModifyableColumnFamilyDescriptor(final byte[] name) { @@ -637,8 +642,8 @@ public ModifyableColumnFamilyDescriptor(final byte[] name) { } /** - * Constructor. Makes a deep copy of the supplied descriptor. - * TODO: make this private after the HCD is removed. + * Constructor. Makes a deep copy of the supplied descriptor. TODO: make this private after the + * HCD is removed. * @param desc The descriptor. */ @InterfaceAudience.Private @@ -646,7 +651,8 @@ public ModifyableColumnFamilyDescriptor(ColumnFamilyDescriptor desc) { this(desc.getName(), desc.getValues(), desc.getConfiguration()); } - private ModifyableColumnFamilyDescriptor(byte[] name, Map values, Map config) { + private ModifyableColumnFamilyDescriptor(byte[] name, Map values, + Map config) { this.name = name; this.values.putAll(values); this.configuration.putAll(config); @@ -690,7 +696,8 @@ public Map getValues() { * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setValue(byte[] key, byte[] value) { - return setValue(toBytesOrNull(key, Function.identity()), toBytesOrNull(value, Function.identity())); + return setValue(toBytesOrNull(key, Function.identity()), + toBytesOrNull(value, Function.identity())); } public ModifyableColumnFamilyDescriptor setValue(String key, String value) { @@ -700,6 +707,7 @@ public ModifyableColumnFamilyDescriptor setValue(String key, String value) { private ModifyableColumnFamilyDescriptor setValue(Bytes key, String value) { return setValue(key, toBytesOrNull(value, Bytes::toBytes)); } + /** * @param key The key. * @param value The value. @@ -715,7 +723,6 @@ private ModifyableColumnFamilyDescriptor setValue(Bytes key, Bytes value) { } /** - * * @param key Key whose key and value we're to remove from HCD parameters. * @return this (for chained invocation) */ @@ -760,8 +767,8 @@ public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { throw new IllegalArgumentException("Maximum versions must be positive"); } if (maxVersions < this.getMinVersions()) { - throw new IllegalArgumentException("Set MaxVersion to " + maxVersions - + " while minVersion is " + this.getMinVersions() + throw new IllegalArgumentException( + "Set MaxVersion to " + maxVersions + " while minVersion is " + this.getMinVersions() + ". Maximum versions must be >= minimum versions "); } setValue(MAX_VERSIONS_BYTES, Integer.toString(maxVersions)); @@ -770,7 +777,6 @@ public ModifyableColumnFamilyDescriptor setMaxVersions(int maxVersions) { /** * Set minimum and maximum versions to keep - * * @param minVersions minimal number of versions * @param maxVersions maximum number of versions * @return this (for chained invocation) @@ -783,8 +789,8 @@ public ModifyableColumnFamilyDescriptor setVersions(int minVersions, int maxVers } if (maxVersions < minVersions) { - throw new IllegalArgumentException("Unable to set MaxVersion to " + maxVersions - + " and set MinVersion to " + minVersions + throw new IllegalArgumentException( + "Unable to set MaxVersion to " + maxVersions + " and set MinVersion to " + minVersions + ", as maximum versions must be >= minimum versions."); } setMinVersions(minVersions); @@ -792,15 +798,13 @@ public ModifyableColumnFamilyDescriptor setVersions(int minVersions, int maxVers return this; } - @Override public int getBlocksize() { return getStringOrDefault(BLOCKSIZE_BYTES, Integer::valueOf, DEFAULT_BLOCKSIZE); } /** - * @param s Blocksize to use when writing out storefiles/hfiles on this - * column family. + * @param s Blocksize to use when writing out storefiles/hfiles on this column family. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlocksize(int s) { @@ -808,8 +812,8 @@ public ModifyableColumnFamilyDescriptor setBlocksize(int s) { } public ModifyableColumnFamilyDescriptor setBlocksize(String blocksize) throws HBaseException { - return setBlocksize(Integer.parseInt(PrettyPrinter. - valueOf(blocksize, PrettyPrinter.Unit.BYTE))); + return setBlocksize( + Integer.parseInt(PrettyPrinter.valueOf(blocksize, PrettyPrinter.Unit.BYTE))); } @Override @@ -819,12 +823,9 @@ public Compression.Algorithm getCompressionType() { } /** - * Compression types supported in hbase. LZO is not bundled as part of the - * hbase distribution. See - * LZO - * Compression - * for how to enable it. - * + * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. + * See LZO Compression for how + * to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ @@ -840,18 +841,17 @@ public DataBlockEncoding getDataBlockEncoding() { /** * Set data block encoding algorithm used in block cache. - * * @param type What kind of data block encoding will be used. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setDataBlockEncoding(DataBlockEncoding type) { - return setValue(DATA_BLOCK_ENCODING_BYTES, type == null ? DataBlockEncoding.NONE.name() : type.name()); + return setValue(DATA_BLOCK_ENCODING_BYTES, + type == null ? DataBlockEncoding.NONE.name() : type.name()); } /** - * Set whether the tags should be compressed along with DataBlockEncoding. - * When no DataBlockEncoding is been used, this is having no effect. - * + * Set whether the tags should be compressed along with DataBlockEncoding. When no + * DataBlockEncoding is been used, this is having no effect. * @param compressTags * @return this (for chained invocation) */ @@ -861,8 +861,7 @@ public ModifyableColumnFamilyDescriptor setCompressTags(boolean compressTags) { @Override public boolean isCompressTags() { - return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, - DEFAULT_COMPRESS_TAGS); + return getStringOrDefault(COMPRESS_TAGS_BYTES, Boolean::valueOf, DEFAULT_COMPRESS_TAGS); } @Override @@ -884,27 +883,24 @@ public Compression.Algorithm getMinorCompactionCompressionType() { } /** - * Compression types supported in hbase. LZO is not bundled as part of the - * hbase distribution. See - * LZO - * Compression - * for how to enable it. - * + * Compression types supported in hbase. LZO is not bundled as part of the hbase distribution. + * See LZO Compression for how + * to enable it. * @param type Compression type setting. * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_BYTES, type.name()); } - public ModifyableColumnFamilyDescriptor setMajorCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setMajorCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_MAJOR_BYTES, type.name()); } - public ModifyableColumnFamilyDescriptor setMinorCompactionCompressionType( - Compression.Algorithm type) { + public ModifyableColumnFamilyDescriptor + setMinorCompactionCompressionType(Compression.Algorithm type) { return setValue(COMPRESSION_COMPACT_MINOR_BYTES, type.name()); } @@ -914,8 +910,8 @@ public boolean isInMemory() { } /** - * @param inMemory True if we are to favor keeping all values for this - * column family in the HRegionServer cache + * @param inMemory True if we are to favor keeping all values for this column family in the + * HRegionServer cache * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setInMemory(boolean inMemory) { @@ -929,23 +925,22 @@ public MemoryCompactionPolicy getInMemoryCompaction() { } /** - * @param inMemoryCompaction the prefered in-memory compaction policy for - * this column family + * @param inMemoryCompaction the prefered in-memory compaction policy for this column family * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { + public ModifyableColumnFamilyDescriptor + setInMemoryCompaction(MemoryCompactionPolicy inMemoryCompaction) { return setValue(IN_MEMORY_COMPACTION_BYTES, inMemoryCompaction.name()); } @Override public KeepDeletedCells getKeepDeletedCells() { - return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, - KeepDeletedCells::getValue, DEFAULT_KEEP_DELETED); + return getStringOrDefault(KEEP_DELETED_CELLS_BYTES, KeepDeletedCells::getValue, + DEFAULT_KEEP_DELETED); } /** - * @param keepDeletedCells True if deleted rows should not be collected - * immediately. + * @param keepDeletedCells True if deleted rows should not be collected immediately. * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setKeepDeletedCells(KeepDeletedCells keepDeletedCells) { @@ -954,13 +949,13 @@ public ModifyableColumnFamilyDescriptor setKeepDeletedCells(KeepDeletedCells kee /** * By default, HBase only consider timestamp in versions. So a previous Delete with higher ts - * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. - * We will also consider mvcc in versions. See HBASE-15968 for details. + * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. We + * will also consider mvcc in versions. See HBASE-15968 for details. */ @Override public boolean isNewVersionBehavior() { - return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, - Boolean::parseBoolean, DEFAULT_NEW_VERSION_BEHAVIOR); + return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, Boolean::parseBoolean, + DEFAULT_NEW_VERSION_BEHAVIOR); } public ModifyableColumnFamilyDescriptor setNewVersionBehavior(boolean newVersionBehavior) { @@ -995,8 +990,7 @@ public int getMinVersions() { } /** - * @param minVersions The minimum number of versions to keep. (used when - * timeToLive is set) + * @param minVersions The minimum number of versions to keep. (used when timeToLive is set) * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { @@ -1004,17 +998,16 @@ public ModifyableColumnFamilyDescriptor setMinVersions(int minVersions) { } /** - * Retain all versions for a given TTL(retentionInterval), and then only a specific number - * of versions(versionAfterInterval) after that interval elapses. - * + * Retain all versions for a given TTL(retentionInterval), and then only a specific number of + * versions(versionAfterInterval) after that interval elapses. * @param retentionInterval Retain all versions for this interval * @param versionAfterInterval Retain no of versions to retain after retentionInterval * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive( - final int retentionInterval, final int versionAfterInterval) { + public ModifyableColumnFamilyDescriptor setVersionsWithTimeToLive(final int retentionInterval, + final int versionAfterInterval) { ModifyableColumnFamilyDescriptor modifyableColumnFamilyDescriptor = - setVersions(versionAfterInterval, Integer.MAX_VALUE); + setVersions(versionAfterInterval, Integer.MAX_VALUE); modifyableColumnFamilyDescriptor.setTimeToLive(retentionInterval); modifyableColumnFamilyDescriptor.setKeepDeletedCells(KeepDeletedCells.TTL); return modifyableColumnFamilyDescriptor; @@ -1026,8 +1019,8 @@ public boolean isBlockCacheEnabled() { } /** - * @param blockCacheEnabled True if hfile DATA type blocks should be cached - * (We always cache INDEX and BLOOM blocks; you cannot turn this off). + * @param blockCacheEnabled True if hfile DATA type blocks should be cached (We always cache + * INDEX and BLOOM blocks; you cannot turn this off). * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setBlockCacheEnabled(boolean blockCacheEnabled) { @@ -1046,7 +1039,8 @@ public ModifyableColumnFamilyDescriptor setBloomFilterType(final BloomType bt) { @Override public int getScope() { - return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, DEFAULT_REPLICATION_SCOPE); + return getStringOrDefault(REPLICATION_SCOPE_BYTES, Integer::valueOf, + DEFAULT_REPLICATION_SCOPE); } /** @@ -1059,7 +1053,8 @@ public ModifyableColumnFamilyDescriptor setScope(int scope) { @Override public boolean isCacheDataOnWrite() { - return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_DATA_ON_WRITE); + return getStringOrDefault(CACHE_DATA_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_DATA_ON_WRITE); } /** @@ -1072,7 +1067,8 @@ public ModifyableColumnFamilyDescriptor setCacheDataOnWrite(boolean value) { @Override public boolean isCacheIndexesOnWrite() { - return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_INDEX_ON_WRITE); + return getStringOrDefault(CACHE_INDEX_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_INDEX_ON_WRITE); } /** @@ -1085,7 +1081,8 @@ public ModifyableColumnFamilyDescriptor setCacheIndexesOnWrite(boolean value) { @Override public boolean isCacheBloomsOnWrite() { - return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, DEFAULT_CACHE_BLOOMS_ON_WRITE); + return getStringOrDefault(CACHE_BLOOMS_ON_WRITE_BYTES, Boolean::valueOf, + DEFAULT_CACHE_BLOOMS_ON_WRITE); } /** @@ -1098,12 +1095,12 @@ public ModifyableColumnFamilyDescriptor setCacheBloomsOnWrite(boolean value) { @Override public boolean isEvictBlocksOnClose() { - return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, DEFAULT_EVICT_BLOCKS_ON_CLOSE); + return getStringOrDefault(EVICT_BLOCKS_ON_CLOSE_BYTES, Boolean::valueOf, + DEFAULT_EVICT_BLOCKS_ON_CLOSE); } /** - * @param value true if we should evict cached blocks from the blockcache on - * close + * @param value true if we should evict cached blocks from the blockcache on close * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { @@ -1112,12 +1109,12 @@ public ModifyableColumnFamilyDescriptor setEvictBlocksOnClose(boolean value) { @Override public boolean isPrefetchBlocksOnOpen() { - return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, DEFAULT_PREFETCH_BLOCKS_ON_OPEN); + return getStringOrDefault(PREFETCH_BLOCKS_ON_OPEN_BYTES, Boolean::valueOf, + DEFAULT_PREFETCH_BLOCKS_ON_OPEN); } /** - * @param value true if we should prefetch blocks into the blockcache on - * open + * @param value true if we should prefetch blocks into the blockcache on open * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setPrefetchBlocksOnOpen(boolean value) { @@ -1137,7 +1134,6 @@ public String toString() { return s.toString(); } - @Override public String toStringCustomizedValues() { StringBuilder s = new StringBuilder(); @@ -1164,9 +1160,8 @@ private StringBuilder getValues(boolean printDefaults) { } String key = Bytes.toString(entry.getKey().get()); String value = Bytes.toStringBinary(entry.getValue().get()); - if (printDefaults - || !DEFAULT_VALUES.containsKey(key) - || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { + if (printDefaults || !DEFAULT_VALUES.containsKey(key) + || !DEFAULT_VALUES.get(key).equalsIgnoreCase(value)) { s.append(", "); s.append(key); s.append(" => "); @@ -1210,7 +1205,8 @@ private StringBuilder getValues(boolean printDefaults) { printCommaForConfiguration = true; s.append('\'').append(e.getKey()).append('\''); s.append(" => "); - s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))).append('\''); + s.append('\'').append(PrettyPrinter.format(e.getValue(), getUnit(e.getKey()))) + .append('\''); } s.append("}"); } @@ -1223,7 +1219,8 @@ public boolean equals(Object obj) { return true; } if (obj instanceof ModifyableColumnFamilyDescriptor) { - return ColumnFamilyDescriptor.COMPARATOR.compare(this, (ModifyableColumnFamilyDescriptor) obj) == 0; + return ColumnFamilyDescriptor.COMPARATOR.compare(this, + (ModifyableColumnFamilyDescriptor) obj) == 0; } return false; } @@ -1247,19 +1244,18 @@ public int compareTo(ModifyableColumnFamilyDescriptor other) { * @see #parseFrom(byte[]) */ private byte[] toByteArray() { - return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this) - .toByteArray()); + return ProtobufUtil.prependPBMagic(ProtobufUtil.toColumnFamilySchema(this).toByteArray()); } /** - * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb - * magic prefix - * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from - * bytes + * @param bytes A pb serialized {@link ModifyableColumnFamilyDescriptor} instance with pb magic + * prefix + * @return An instance of {@link ModifyableColumnFamilyDescriptor} made from bytes * @throws DeserializationException * @see #toByteArray() */ - private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) throws DeserializationException { + private static ColumnFamilyDescriptor parseFrom(final byte[] bytes) + throws DeserializationException { if (!ProtobufUtil.isPBMagicPrefix(bytes)) { throw new DeserializationException("No magic"); } @@ -1288,9 +1284,7 @@ public Map getConfiguration() { /** * Setter for storing a configuration setting in {@link #configuration} map. - * - * @param key Config key. Same as XML config key e.g. - * hbase.something.or.other. + * @param key Config key. Same as XML config key e.g. hbase.something.or.other. * @param value String value. If null, removes the configuration. * @return this (for chained invocation) */ @@ -1304,9 +1298,7 @@ public ModifyableColumnFamilyDescriptor setConfiguration(String key, String valu } /** - * Remove a configuration setting represented by the key from the - * {@link #configuration} map. - * + * Remove a configuration setting represented by the key from the {@link #configuration} map. * @param key * @return this (for chained invocation) */ @@ -1321,7 +1313,6 @@ public String getEncryptionType() { /** * Set the encryption algorithm for use with this family - * * @param algorithm * @return this (for chained invocation) */ @@ -1336,7 +1327,6 @@ public byte[] getEncryptionKey() { /** * Set the raw crypto key attribute for the family - * * @param keyBytes * @return this (for chained invocation) */ @@ -1351,7 +1341,6 @@ public long getMobThreshold() { /** * Sets the mob threshold of the family. - * * @param threshold The mob threshold. * @return this (for chained invocation) */ @@ -1366,7 +1355,6 @@ public boolean isMobEnabled() { /** * Enables the mob for the family. - * * @param isMobEnabled Whether to enable the mob for the family. * @return this (for chained invocation) */ @@ -1383,32 +1371,30 @@ public MobCompactPartitionPolicy getMobCompactPartitionPolicy() { /** * Set the mob compact partition policy for the family. - * * @param policy policy type * @return this (for chained invocation) */ - public ModifyableColumnFamilyDescriptor setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { + public ModifyableColumnFamilyDescriptor + setMobCompactPartitionPolicy(MobCompactPartitionPolicy policy) { return setValue(MOB_COMPACT_PARTITION_POLICY_BYTES, policy.name()); } @Override public short getDFSReplication() { - return getStringOrDefault(DFS_REPLICATION_BYTES, - Short::valueOf, DEFAULT_DFS_REPLICATION); + return getStringOrDefault(DFS_REPLICATION_BYTES, Short::valueOf, DEFAULT_DFS_REPLICATION); } /** * Set the replication factor to hfile(s) belonging to this family - * - * @param replication number of replicas the blocks(s) belonging to this CF - * should have, or {@link #DEFAULT_DFS_REPLICATION} for the default - * replication factor set in the filesystem + * @param replication number of replicas the blocks(s) belonging to this CF should have, or + * {@link #DEFAULT_DFS_REPLICATION} for the default replication factor set in the + * filesystem * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setDFSReplication(short replication) { if (replication < 1 && replication != DEFAULT_DFS_REPLICATION) { throw new IllegalArgumentException( - "DFS replication factor cannot be less than 1 if explicitly set."); + "DFS replication factor cannot be less than 1 if explicitly set."); } return setValue(DFS_REPLICATION_BYTES, Short.toString(replication)); } @@ -1420,11 +1406,8 @@ public String getStoragePolicy() { /** * Set the storage policy for use with this family - * - * @param policy the policy to set, valid setting includes: - * "LAZY_PERSIST", - * "ALL_SSD", "ONE_SSD", "HOT", "WARM", - * "COLD" + * @param policy the policy to set, valid setting includes: "LAZY_PERSIST", + * "ALL_SSD", "ONE_SSD", "HOT", "WARM", "COLD" * @return this (for chained invocation) */ public ModifyableColumnFamilyDescriptor setStoragePolicy(String policy) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java index 018cfef02605..25b45f6c0d51 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +16,18 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; /** - * Currently, there are only two compact types: - * {@code NORMAL} means do store files compaction; + * Currently, there are only two compact types: {@code NORMAL} means do store files compaction; * {@code MOB} means do mob files compaction. - * */ + */ @InterfaceAudience.Public public enum CompactType { - NORMAL (0), - MOB (1); + NORMAL(0), MOB(1); - CompactType(int value) {} + CompactType(int value) { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java index 51f7d071e4ac..b70dce458441 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompactionState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java index 08afeb61b558..592a99b0584d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CompleteScanResultCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +21,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * A scan result cache that only returns complete result. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java index 3ea13a44ed58..e281a8812712 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Connection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,23 +28,22 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * A cluster connection encapsulating lower level individual connections to actual servers and - * a connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} - * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} - * the connection to release the resources. - * - *

    The connection object contains logic to find the master, locate regions out on the cluster, - * keeps a cache of locations and then knows how to re-calibrate after they move. The individual - * connections to servers, meta cache, zookeeper connection, etc are all shared by the - * {@link Table} and {@link Admin} instances obtained from this connection. - * - *

    Connection creation is a heavy-weight operation. Connection implementations are thread-safe, - * so that the client can create a connection once, and share it with different threads. - * {@link Table} and {@link Admin} instances, on the other hand, are light-weight and are not - * thread-safe. Typically, a single connection per client application is instantiated and every - * thread will obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} - * is not recommended. - * + * A cluster connection encapsulating lower level individual connections to actual servers and a + * connection to zookeeper. Connections are instantiated through the {@link ConnectionFactory} + * class. The lifecycle of the connection is managed by the caller, who has to {@link #close()} the + * connection to release the resources. + *

    + * The connection object contains logic to find the master, locate regions out on the cluster, keeps + * a cache of locations and then knows how to re-calibrate after they move. The individual + * connections to servers, meta cache, zookeeper connection, etc are all shared by the {@link Table} + * and {@link Admin} instances obtained from this connection. + *

    + * Connection creation is a heavy-weight operation. Connection implementations are thread-safe, so + * that the client can create a connection once, and share it with different threads. {@link Table} + * and {@link Admin} instances, on the other hand, are light-weight and are not thread-safe. + * Typically, a single connection per client application is instantiated and every thread will + * obtain its own Table instance. Caching or pooling of {@link Table} and {@link Admin} is not + * recommended. * @see ConnectionFactory * @since 0.99.0 */ @@ -52,13 +51,11 @@ public interface Connection extends Abortable, Closeable { /* - * Implementation notes: - * - Only allow new style of interfaces: - * -- All table names are passed as TableName. No more byte[] and string arguments - * -- Most of the classes with names H is deprecated in favor of non-H versions - * (Table, Connection, etc) - * -- Only real client-facing public methods are allowed - * - Connection should contain only getTable(), getAdmin() kind of general methods. + * Implementation notes: - Only allow new style of interfaces: -- All table names are passed as + * TableName. No more byte[] and string arguments -- Most of the classes with names H is + * deprecated in favor of non-H versions (Table, Connection, etc) -- Only real client-facing + * public methods are allowed - Connection should contain only getTable(), getAdmin() kind of + * general methods. */ /** @@ -67,17 +64,14 @@ public interface Connection extends Abortable, Closeable { Configuration getConfiguration(); /** - * Retrieve a Table implementation for accessing a table. - * The returned Table is not thread safe, a new instance should be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned Table - * is neither required nor desired. + * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a + * new instance should be created for each using thread. This is a lightweight operation, pooling + * or caching of the returned Table is neither required nor desired. *

    - * The caller is responsible for calling {@link Table#close()} on the returned - * table instance. + * The caller is responsible for calling {@link Table#close()} on the returned table instance. *

    - * Since 0.98.1 this method no longer checks table existence. An exception - * will be thrown if the table does not exist only when the first operation is - * attempted. + * Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the + * table does not exist only when the first operation is attempted. * @param tableName the name of the table * @return a Table to use for interactions with this table */ @@ -86,18 +80,14 @@ default Table getTable(TableName tableName) throws IOException { } /** - * Retrieve a Table implementation for accessing a table. - * The returned Table is not thread safe, a new instance should be created for each using thread. - * This is a lightweight operation, pooling or caching of the returned Table - * is neither required nor desired. + * Retrieve a Table implementation for accessing a table. The returned Table is not thread safe, a + * new instance should be created for each using thread. This is a lightweight operation, pooling + * or caching of the returned Table is neither required nor desired. *

    - * The caller is responsible for calling {@link Table#close()} on the returned - * table instance. + * The caller is responsible for calling {@link Table#close()} on the returned table instance. *

    - * Since 0.98.1 this method no longer checks table existence. An exception - * will be thrown if the table does not exist only when the first operation is - * attempted. - * + * Since 0.98.1 this method no longer checks table existence. An exception will be thrown if the + * table does not exist only when the first operation is attempted. * @param tableName the name of the table * @param pool The thread pool to use for batch operations, null to use a default pool. * @return a Table to use for interactions with this table @@ -109,18 +99,16 @@ default Table getTable(TableName tableName, ExecutorService pool) throws IOExcep /** *

    * Retrieve a {@link BufferedMutator} for performing client-side buffering of writes. The - * {@link BufferedMutator} returned by this method is thread-safe. - * This accessor will create a new ThreadPoolExecutor and will be shutdown once we close the - * BufferedMutator. This object can be used for long lived operations. + * {@link BufferedMutator} returned by this method is thread-safe. This accessor will create a new + * ThreadPoolExecutor and will be shutdown once we close the BufferedMutator. This object can be + * used for long lived operations. *

    *

    - * The caller is responsible for calling {@link BufferedMutator#close()} on - * the returned {@link BufferedMutator} instance. + * The caller is responsible for calling {@link BufferedMutator#close()} on the returned + * {@link BufferedMutator} instance. *

    *

    - * * @param tableName the name of the table - * * @return a {@link BufferedMutator} for the supplied tableName. */ BufferedMutator getBufferedMutator(TableName tableName) throws IOException; @@ -133,7 +121,6 @@ default Table getTable(TableName tableName, ExecutorService pool) throws IOExcep * responsibility to shutdown. For ThreadPool created by us, we will shutdown when user calls * {@link BufferedMutator#close()}. The caller is responsible for calling * {@link BufferedMutator#close()} on the returned {@link BufferedMutator} instance. - * * @param params details on how to instantiate the {@code BufferedMutator}. * @return a {@link BufferedMutator} for the supplied tableName. */ @@ -142,15 +129,10 @@ default Table getTable(TableName tableName, ExecutorService pool) throws IOExcep /** * Retrieve a RegionLocator implementation to inspect region information on a table. The returned * RegionLocator is not thread-safe, so a new instance should be created for each using thread. - * - * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither - * required nor desired. - *
    + * This is a lightweight operation. Pooling or caching of the returned RegionLocator is neither + * required nor desired.
    * The caller is responsible for calling {@link RegionLocator#close()} on the returned - * RegionLocator instance. - * - * RegionLocator needs to be unmanaged - * + * RegionLocator instance. RegionLocator needs to be unmanaged * @param tableName Name of the table who's region is to be examined * @return A RegionLocator instance */ @@ -167,14 +149,10 @@ default Table getTable(TableName tableName, ExecutorService pool) throws IOExcep void clearRegionLocationCache(); /** - * Retrieve an Admin implementation to administer an HBase cluster. - * The returned Admin is not guaranteed to be thread-safe. A new instance should be created for - * each using thread. This is a lightweight operation. Pooling or caching of the returned - * Admin is not recommended. - *
    - * The caller is responsible for calling {@link Admin#close()} on the returned - * Admin instance. - * + * Retrieve an Admin implementation to administer an HBase cluster. The returned Admin is not + * guaranteed to be thread-safe. A new instance should be created for each using thread. This is a + * lightweight operation. Pooling or caching of the returned Admin is not recommended.
    + * The caller is responsible for calling {@link Admin#close()} on the returned Admin instance. * @return an Admin instance for cluster administration */ Admin getAdmin() throws IOException; @@ -196,15 +174,11 @@ default Table getTable(TableName tableName, ExecutorService pool) throws IOExcep TableBuilder getTableBuilder(TableName tableName, ExecutorService pool); /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
    - * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
    + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
    + * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
    * This will be used mostly by hbck tool. - * * @return an Hbck instance for active master. Active master is fetched from the zookeeper. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) @@ -213,18 +187,13 @@ default Hbck getHbck() throws IOException { } /** - * Retrieve an Hbck implementation to fix an HBase cluster. - * The returned Hbck is not guaranteed to be thread-safe. A new instance should be created by - * each thread. This is a lightweight operation. Pooling or caching of the returned Hbck instance - * is not recommended. - *
    - * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance. - *
    - * This will be used mostly by hbck tool. This may only be used to by pass getting - * registered master from ZK. In situations where ZK is not available or active master is not - * registered with ZK and user can get master address by other means, master can be explicitly - * specified. - * + * Retrieve an Hbck implementation to fix an HBase cluster. The returned Hbck is not guaranteed to + * be thread-safe. A new instance should be created by each thread. This is a lightweight + * operation. Pooling or caching of the returned Hbck instance is not recommended.
    + * The caller is responsible for calling {@link Hbck#close()} on the returned Hbck instance.
    + * This will be used mostly by hbck tool. This may only be used to by pass getting registered + * master from ZK. In situations where ZK is not available or active master is not registered with + * ZK and user can get master address by other means, master can be explicitly specified. * @param masterServer explicit {@link ServerName} for master server * @return an Hbck instance for a specified master server */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java index 19a398b8c66d..dab71ece000b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionConfiguration.java @@ -1,14 +1,20 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more contributor license - * agreements. See the NOTICE file distributed with this work for additional information regarding - * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. You may obtain a - * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable - * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" - * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License - * for the specific language governing permissions and limitations under the License. +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.conf.Configuration; @@ -16,12 +22,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Configuration parameters for the connection. - * Configuration is a heavy weight registry that does a lot of string operations and regex matching. - * Method calls into Configuration account for high CPU usage and have huge performance impact. - * This class caches connection-related configuration values in the ConnectionConfiguration - * object so that expensive conf.getXXX() calls are avoided every time HTable, etc is instantiated. - * see HBASE-12128 + * Configuration parameters for the connection. Configuration is a heavy weight registry that does a + * lot of string operations and regex matching. Method calls into Configuration account for high CPU + * usage and have huge performance impact. This class caches connection-related configuration values + * in the ConnectionConfiguration object so that expensive conf.getXXX() calls are avoided every + * time HTable, etc is instantiated. see HBASE-12128 */ @InterfaceAudience.Private public class ConnectionConfiguration { @@ -29,18 +34,18 @@ public class ConnectionConfiguration { public static final String WRITE_BUFFER_SIZE_KEY = "hbase.client.write.buffer"; public static final long WRITE_BUFFER_SIZE_DEFAULT = 2097152; public static final String WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS = - "hbase.client.write.buffer.periodicflush.timeout.ms"; + "hbase.client.write.buffer.periodicflush.timeout.ms"; public static final String WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS = - "hbase.client.write.buffer.periodicflush.timertick.ms"; + "hbase.client.write.buffer.periodicflush.timertick.ms"; public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT = 0; // 0 == Disabled public static final long WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT = 1000L; // 1 second public static final String MAX_KEYVALUE_SIZE_KEY = "hbase.client.keyvalue.maxsize"; public static final int MAX_KEYVALUE_SIZE_DEFAULT = 10485760; public static final String PRIMARY_CALL_TIMEOUT_MICROSECOND = - "hbase.client.primaryCallTimeout.get"; + "hbase.client.primaryCallTimeout.get"; public static final int PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT = 10000; // 10ms public static final String PRIMARY_SCAN_TIMEOUT_MICROSECOND = - "hbase.client.replicaCallTimeout.scan"; + "hbase.client.replicaCallTimeout.scan"; public static final int PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT = 1000000; // 1s private final long writeBufferSize; @@ -68,42 +73,39 @@ public class ConnectionConfiguration { ConnectionConfiguration(Configuration conf) { this.writeBufferSize = conf.getLong(WRITE_BUFFER_SIZE_KEY, WRITE_BUFFER_SIZE_DEFAULT); - this.writeBufferPeriodicFlushTimeoutMs = conf.getLong( - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); + this.writeBufferPeriodicFlushTimeoutMs = conf.getLong(WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS, + WRITE_BUFFER_PERIODIC_FLUSH_TIMEOUT_MS_DEFAULT); this.writeBufferPeriodicFlushTimerTickMs = conf.getLong( - WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, - WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); + WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS_DEFAULT); this.metaOperationTimeout = conf.getInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.operationTimeout = conf.getInt( - HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.scannerCaching = conf.getInt( - HConstants.HBASE_CLIENT_SCANNER_CACHING, HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); + this.scannerCaching = conf.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING); - this.scannerMaxResultSize = - conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, - HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); + this.scannerMaxResultSize = conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY, + HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE); this.primaryCallTimeoutMicroSecond = - conf.getInt(PRIMARY_CALL_TIMEOUT_MICROSECOND, PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT); + conf.getInt(PRIMARY_CALL_TIMEOUT_MICROSECOND, PRIMARY_CALL_TIMEOUT_MICROSECOND_DEFAULT); this.replicaCallTimeoutMicroSecondScan = - conf.getInt(PRIMARY_SCAN_TIMEOUT_MICROSECOND, PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT); + conf.getInt(PRIMARY_SCAN_TIMEOUT_MICROSECOND, PRIMARY_SCAN_TIMEOUT_MICROSECOND_DEFAULT); this.metaReplicaCallTimeoutMicroSecondScan = - conf.getInt(HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, - HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT); + conf.getInt(HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT, + HConstants.HBASE_CLIENT_META_REPLICA_SCAN_TIMEOUT_DEFAULT); - this.retries = conf.getInt( - HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + this.retries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); - this.clientScannerAsyncPrefetch = conf.getBoolean( - Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH); + this.clientScannerAsyncPrefetch = conf.getBoolean(Scan.HBASE_CLIENT_SCANNER_ASYNC_PREFETCH, + Scan.DEFAULT_HBASE_CLIENT_SCANNER_ASYNC_PREFETCH); this.maxKeyValueSize = conf.getInt(MAX_KEYVALUE_SIZE_KEY, MAX_KEYVALUE_SIZE_DEFAULT); @@ -111,16 +113,15 @@ public class ConnectionConfiguration { conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.readRpcTimeout = conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); this.writeRpcTimeout = conf.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); } /** - * Constructor - * This is for internal testing purpose (using the default value). - * In real usage, we should read the configuration from the Configuration object. + * Constructor This is for internal testing purpose (using the default value). In real usage, we + * should read the configuration from the Configuration object. */ protected ConnectionConfiguration() { this.writeBufferSize = WRITE_BUFFER_SIZE_DEFAULT; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index 7980532727c6..3bec4f1785d2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,13 +53,15 @@ * Since 2.2.0, Connection created by ConnectionFactory can contain user-specified kerberos * credentials if caller has following two configurations set: *

      - *
    • hbase.client.keytab.file, points to a valid keytab on the local filesystem - *
    • hbase.client.kerberos.principal, gives the Kerberos principal to use + *
    • hbase.client.keytab.file, points to a valid keytab on the local filesystem + *
    • hbase.client.kerberos.principal, gives the Kerberos principal to use *
    * By this way, caller can directly connect to kerberized cluster without caring login and * credentials renewal logic in application. + * *
      * 
    + * * Similarly, {@link Connection} also returns {@link Admin} and {@link RegionLocator} * implementations. * @see Connection @@ -69,7 +70,8 @@ @InterfaceAudience.Public public class ConnectionFactory { - public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = "hbase.client.async.connection.impl"; + public static final String HBASE_CLIENT_ASYNC_CONNECTION_IMPL = + "hbase.client.async.connection.impl"; /** No public c.tors */ protected ConnectionFactory() { @@ -212,7 +214,7 @@ public static Connection createConnection(Configuration conf, User user) throws * @return Connection object for conf */ public static Connection createConnection(Configuration conf, ExecutorService pool, - final User user) throws IOException { + final User user) throws IOException { return TraceUtil.trace(() -> { String className = conf.get(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, ConnectionImplementation.class.getName()); @@ -225,10 +227,10 @@ public static Connection createConnection(Configuration conf, ExecutorService po try { // Default HCM#HCI is not accessible; make it so before invoking. Constructor constructor = - clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class); + clazz.getDeclaredConstructor(Configuration.class, ExecutorService.class, User.class); constructor.setAccessible(true); return user.runAs((PrivilegedExceptionAction) () -> (Connection) constructor - .newInstance(conf, pool, user)); + .newInstance(conf, pool, user)); } catch (Exception e) { throw new IOException(e); } @@ -299,9 +301,9 @@ public static CompletableFuture createAsyncConnection(Configura Class clazz = conf.getClass(HBASE_CLIENT_ASYNC_CONNECTION_IMPL, AsyncConnectionImpl.class, AsyncConnection.class); try { - future.complete(user.runAs( - (PrivilegedExceptionAction) () -> ReflectionUtils - .newInstance(clazz, conf, registry, clusterId, user))); + future.complete( + user.runAs((PrivilegedExceptionAction) () -> ReflectionUtils + .newInstance(clazz, conf, registry, clusterId, user))); } catch (Exception e) { registry.close(); future.completeExceptionally(e); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java index 13715d9471ac..9956091a53a8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionImplementation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -163,12 +162,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigResponse; /** - * Main implementation of {@link Connection} and {@link ClusterConnection} interfaces. - * Encapsulates connection to zookeeper and regionservers. + * Main implementation of {@link Connection} and {@link ClusterConnection} interfaces. Encapsulates + * connection to zookeeper and regionservers. */ @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION", - justification="Access to the conncurrent hash map is under a lock so should be fine.") + value = "AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION", + justification = "Access to the conncurrent hash map is under a lock so should be fine.") @InterfaceAudience.Private public class ConnectionImplementation implements ClusterConnection, Closeable { public static final String RETRIES_BY_SERVER_KEY = "hbase.client.retries.by.server"; @@ -186,8 +185,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { final int rpcTimeout; /** - * Global nonceGenerator shared per client.Currently there's no reason to limit its scope. - * Once it's set under nonceGeneratorCreateLock, it is never unset or changed. + * Global nonceGenerator shared per client.Currently there's no reason to limit its scope. Once + * it's set under nonceGeneratorCreateLock, it is never unset or changed. */ private static volatile NonceGenerator nonceGenerator = null; /** The nonce generator lock. Only taken when creating Connection, which gets a private copy. */ @@ -243,8 +242,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { private final ClientBackoffPolicy backoffPolicy; /** - * Allow setting an alternate BufferedMutator implementation via - * config. If null, use default. + * Allow setting an alternate BufferedMutator implementation via config. If null, use default. */ private final String alternateBufferedMutatorClassName; @@ -265,7 +263,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { * Constructor, for creating cluster connection with provided ConnectionRegistry. */ ConnectionImplementation(Configuration conf, ExecutorService pool, User user, - ConnectionRegistry registry) throws IOException { + ConnectionRegistry registry) throws IOException { this.conf = conf; this.user = user; if (user != null && user.isLoginFromKeytab()) { @@ -274,8 +272,7 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { this.batchPool = (ThreadPoolExecutor) pool; this.connectionConfig = new ConnectionConfiguration(conf); this.closed = false; - this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); long configuredPauseForCQTBE = conf.getLong(HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE, pause); if (configuredPauseForCQTBE < pause) { LOG.warn("The " + HConstants.HBASE_CLIENT_PAUSE_FOR_CQTBE + " setting: " @@ -290,9 +287,8 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { // how many times to try, one more than max *retry* time this.numTries = retries2Attempts(connectionConfig.getRetriesNumber()); - this.rpcTimeout = conf.getInt( - HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.rpcTimeout = + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); if (conf.getBoolean(NonceGenerator.CLIENT_NONCES_ENABLED_KEY, true)) { synchronized (nonceGeneratorCreateLock) { if (nonceGenerator == null) { @@ -311,22 +307,20 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { this.asyncProcess = new AsyncProcess(this, conf, rpcCallerFactory, rpcControllerFactory); if (conf.getBoolean(CLIENT_SIDE_METRICS_ENABLED_KEY, false)) { this.metrics = - new MetricsConnection(this.toString(), this::getBatchPool, this::getMetaLookupPool); + new MetricsConnection(this.toString(), this::getBatchPool, this::getMetaLookupPool); } else { this.metrics = null; } this.metaCache = new MetaCache(this.metrics); - boolean shouldListen = conf.getBoolean(HConstants.STATUS_PUBLISHED, - HConstants.STATUS_PUBLISHED_DEFAULT); - Class listenerClass = - conf.getClass(ClusterStatusListener.STATUS_LISTENER_CLASS, - ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS, - ClusterStatusListener.Listener.class); + boolean shouldListen = + conf.getBoolean(HConstants.STATUS_PUBLISHED, HConstants.STATUS_PUBLISHED_DEFAULT); + Class listenerClass = conf.getClass( + ClusterStatusListener.STATUS_LISTENER_CLASS, + ClusterStatusListener.DEFAULT_STATUS_LISTENER_CLASS, ClusterStatusListener.Listener.class); // Is there an alternate BufferedMutator to use? - this.alternateBufferedMutatorClassName = - this.conf.get(BufferedMutator.CLASSNAME_KEY); + this.alternateBufferedMutatorClassName = this.conf.get(BufferedMutator.CLASSNAME_KEY); try { if (registry == null) { @@ -341,11 +335,11 @@ public class ConnectionImplementation implements ClusterConnection, Closeable { // Do we publish the status? if (shouldListen) { if (listenerClass == null) { - LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + - ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status"); + LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " + + ClusterStatusListener.STATUS_LISTENER_CLASS + " is not set - not listening status"); } else { - clusterStatusListener = new ClusterStatusListener( - new ClusterStatusListener.DeadServerHandler() { + clusterStatusListener = + new ClusterStatusListener(new ClusterStatusListener.DeadServerHandler() { @Override public void newDead(ServerName sn) { clearCaches(sn); @@ -362,33 +356,32 @@ public void newDead(ServerName sn) { } // Get the region locator's meta replica mode. - this.metaReplicaMode = CatalogReplicaMode.fromString(conf.get(LOCATOR_META_REPLICAS_MODE, - CatalogReplicaMode.NONE.toString())); + this.metaReplicaMode = CatalogReplicaMode + .fromString(conf.get(LOCATOR_META_REPLICAS_MODE, CatalogReplicaMode.NONE.toString())); switch (this.metaReplicaMode) { case LOAD_BALANCE: - String replicaSelectorClass = conf.get( - RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, - CatalogReplicaLoadBalanceSimpleSelector.class.getName()); - - this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory.createSelector( - replicaSelectorClass, META_TABLE_NAME, getChoreService(), () -> { - int numOfReplicas = 1; - try { - RegionLocations metaLocations = this.registry.getMetaRegionLocations().get( - connectionConfig.getReadRpcTimeout(), TimeUnit.MILLISECONDS); - numOfReplicas = metaLocations.size(); - } catch (Exception e) { - LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); - } - return numOfReplicas; - }); + String replicaSelectorClass = + conf.get(RegionLocator.LOCATOR_META_REPLICAS_MODE_LOADBALANCE_SELECTOR, + CatalogReplicaLoadBalanceSimpleSelector.class.getName()); + + this.metaReplicaSelector = CatalogReplicaLoadBalanceSelectorFactory + .createSelector(replicaSelectorClass, META_TABLE_NAME, getChoreService(), () -> { + int numOfReplicas = 1; + try { + RegionLocations metaLocations = this.registry.getMetaRegionLocations() + .get(connectionConfig.getReadRpcTimeout(), TimeUnit.MILLISECONDS); + numOfReplicas = metaLocations.size(); + } catch (Exception e) { + LOG.error("Failed to get table {}'s region replication, ", META_TABLE_NAME, e); + } + return numOfReplicas; + }); break; case NONE: // If user does not configure LOCATOR_META_REPLICAS_MODE, let's check the legacy config. - boolean useMetaReplicas = conf.getBoolean(USE_META_REPLICAS, - DEFAULT_USE_META_REPLICAS); + boolean useMetaReplicas = conf.getBoolean(USE_META_REPLICAS, DEFAULT_USE_META_REPLICAS); if (useMetaReplicas) { this.metaReplicaMode = CatalogReplicaMode.HEDGED_READ; } @@ -408,12 +401,10 @@ private void spawnRenewalChore(final UserGroupInformation user) { * @param cnm Replaces the nonce generator used, for testing. * @return old nonce generator. */ - static NonceGenerator injectNonceGeneratorForTesting( - ClusterConnection conn, NonceGenerator cnm) { - ConnectionImplementation connImpl = (ConnectionImplementation)conn; + static NonceGenerator injectNonceGeneratorForTesting(ClusterConnection conn, NonceGenerator cnm) { + ConnectionImplementation connImpl = (ConnectionImplementation) conn; NonceGenerator ng = connImpl.getNonceGenerator(); - LOG.warn("Nonce generator is being replaced by test code for " - + cnm.getClass().getName()); + LOG.warn("Nonce generator is being replaced by test code for " + cnm.getClass().getName()); nonceGenerator = cnm; return ng; } @@ -445,11 +436,11 @@ public BufferedMutator getBufferedMutator(BufferedMutatorParams params) { } if (params.getWriteBufferPeriodicFlushTimeoutMs() == BufferedMutatorParams.UNSET) { params.setWriteBufferPeriodicFlushTimeoutMs( - connectionConfig.getWriteBufferPeriodicFlushTimeoutMs()); + connectionConfig.getWriteBufferPeriodicFlushTimeoutMs()); } if (params.getWriteBufferPeriodicFlushTimerTickMs() == BufferedMutatorParams.UNSET) { params.setWriteBufferPeriodicFlushTimerTickMs( - connectionConfig.getWriteBufferPeriodicFlushTimerTickMs()); + connectionConfig.getWriteBufferPeriodicFlushTimerTickMs()); } if (params.getMaxKeyValueSize() == BufferedMutatorParams.UNSET) { params.maxKeyValueSize(connectionConfig.getMaxKeyValueSize()); @@ -464,8 +455,8 @@ public BufferedMutator getBufferedMutator(BufferedMutatorParams params) { return new BufferedMutatorImpl(this, rpcCallerFactory, rpcControllerFactory, params); } try { - return (BufferedMutator)ReflectionUtils.newInstance(Class.forName(implementationClassName), - this, rpcCallerFactory, rpcControllerFactory, params); + return (BufferedMutator) ReflectionUtils.newInstance(Class.forName(implementationClassName), + this, rpcCallerFactory, rpcControllerFactory, params); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } @@ -500,16 +491,16 @@ public Hbck getHbck(ServerName masterServer) throws IOException { throw new RegionServerStoppedException(masterServer + " is dead."); } String key = - getStubKey(MasterProtos.HbckService.BlockingInterface.class.getName(), masterServer); + getStubKey(MasterProtos.HbckService.BlockingInterface.class.getName(), masterServer); return new HBaseHbck( - (MasterProtos.HbckService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { - BlockingRpcChannel channel = - this.rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout); - return MasterProtos.HbckService.newBlockingStub(channel); - }), rpcControllerFactory); + (MasterProtos.HbckService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { + BlockingRpcChannel channel = + this.rpcClient.createBlockingRpcChannel(masterServer, user, rpcTimeout); + return MasterProtos.HbckService.newBlockingStub(channel); + }), rpcControllerFactory); }, () -> TraceUtil.createSpan(this.getClass().getSimpleName() + ".getHbck") - .setAttribute(HBaseSemanticAttributes.SERVER_NAME_KEY, masterServer.getServerName())); + .setAttribute(HBaseSemanticAttributes.SERVER_NAME_KEY, masterServer.getServerName())); } @Override @@ -552,16 +543,14 @@ private ThreadPoolExecutor getThreadPool(int maxThreads, int coreThreads, String long keepAliveTime = conf.getLong("hbase.hconnection.threads.keepalivetime", 60); BlockingQueue workQueue = passedWorkQueue; if (workQueue == null) { - workQueue = - new LinkedBlockingQueue<>(maxThreads * - conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, - HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); + workQueue = new LinkedBlockingQueue<>(maxThreads * conf.getInt( + HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS, HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS)); coreThreads = maxThreads; } ThreadPoolExecutor tpe = - new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, - new ThreadFactoryBuilder().setNameFormat(toString() + nameHint + "-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadPoolExecutor(coreThreads, maxThreads, keepAliveTime, TimeUnit.SECONDS, workQueue, + new ThreadFactoryBuilder().setNameFormat(toString() + nameHint + "-pool-%d") + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); tpe.allowCoreThreadTimeOut(true); return tpe; } @@ -570,15 +559,13 @@ private ThreadPoolExecutor getMetaLookupPool() { if (this.metaLookupPool == null) { synchronized (this) { if (this.metaLookupPool == null) { - //Some of the threads would be used for meta replicas - //To start with, threads.max.core threads can hit the meta (including replicas). - //After that, requests will get queued up in the passed queue, and only after - //the queue is full, a new thread will be started + // Some of the threads would be used for meta replicas + // To start with, threads.max.core threads can hit the meta (including replicas). + // After that, requests will get queued up in the passed queue, and only after + // the queue is full, a new thread will be started int threads = conf.getInt("hbase.hconnection.meta.lookup.threads.max", 128); - this.metaLookupPool = getThreadPool( - threads, - threads, - "-metaLookup-shared-", new LinkedBlockingQueue<>()); + this.metaLookupPool = + getThreadPool(threads, threads, "-metaLookup-shared-", new LinkedBlockingQueue<>()); } } } @@ -624,7 +611,7 @@ RpcClient getRpcClient() { * An identifier that will remain the same for a given connection. */ @Override - public String toString(){ + public String toString() { return "hconnection-0x" + Integer.toHexString(hashCode()); } @@ -668,9 +655,9 @@ private void checkClosed() throws LocalConnectionClosedException { } /** - * Like {@link ConnectionClosedException} but thrown from the checkClosed call which looks - * at the local this.closed flag. We use this rather than {@link ConnectionClosedException} - * because the latter does not inherit from DoNotRetryIOE (it should. TODO). + * Like {@link ConnectionClosedException} but thrown from the checkClosed call which looks at the + * local this.closed flag. We use this rather than {@link ConnectionClosedException} because the + * latter does not inherit from DoNotRetryIOE (it should. TODO). */ private static class LocalConnectionClosedException extends DoNotRetryIOException { LocalConnectionClosedException(String message) { @@ -705,7 +692,6 @@ public HRegionLocation getRegionLocation(final TableName tableName, final byte[] return reload ? relocateRegion(tableName, row) : locateRegion(tableName, row); } - @Override public boolean isTableEnabled(TableName tableName) throws IOException { return getTableState(tableName).inStates(TableState.State.ENABLED); @@ -730,7 +716,7 @@ public boolean isTableAvailable(final TableName tableName, @Nullable final byte[ return true; } List> locations = - MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true); + MetaTableAccessor.getTableRegionsAndLocations(this, tableName, true); int notDeployed = 0; int regionCount = 0; @@ -738,21 +724,21 @@ public boolean isTableAvailable(final TableName tableName, @Nullable final byte[ RegionInfo info = pair.getFirst(); if (pair.getSecond() == null) { LOG.debug("Table {} has not deployed region {}", tableName, - pair.getFirst().getEncodedName()); + pair.getFirst().getEncodedName()); notDeployed++; - } else if (splitKeys != null - && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { - for (byte[] splitKey : splitKeys) { - // Just check if the splitkey is available - if (Bytes.equals(info.getStartKey(), splitKey)) { - regionCount++; - break; + } else + if (splitKeys != null && !Bytes.equals(info.getStartKey(), HConstants.EMPTY_BYTE_ARRAY)) { + for (byte[] splitKey : splitKeys) { + // Just check if the splitkey is available + if (Bytes.equals(info.getStartKey(), splitKey)) { + regionCount++; + break; + } } + } else { + // Always empty start row should be counted + regionCount++; } - } else { - // Always empty start row should be counted - regionCount++; - } } if (notDeployed > 0) { if (LOG.isDebugEnabled()) { @@ -762,7 +748,7 @@ public boolean isTableAvailable(final TableName tableName, @Nullable final byte[ } else if (splitKeys != null && regionCount != splitKeys.length + 1) { if (LOG.isDebugEnabled()) { LOG.debug("Table {} expected to have {} regions, but only {} available", tableName, - splitKeys.length + 1, regionCount); + splitKeys.length + 1, regionCount); } return false; } else { @@ -832,16 +818,16 @@ public HRegionLocation locateRegion(final TableName tableName, final byte[] row) public HRegionLocation relocateRegion(final TableName tableName, final byte[] row) throws IOException { RegionLocations locations = - relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); + relocateRegion(tableName, row, RegionReplicaUtil.DEFAULT_REPLICA_ID); return locations == null ? null - : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); + : locations.getRegionLocation(RegionReplicaUtil.DEFAULT_REPLICA_ID); } @Override - public RegionLocations relocateRegion(final TableName tableName, - final byte [] row, int replicaId) throws IOException{ + public RegionLocations relocateRegion(final TableName tableName, final byte[] row, int replicaId) + throws IOException { // Since this is an explicit request not to use any caching, finding - // disabled tables should not be desirable. This will ensure that an exception is thrown when + // disabled tables should not be desirable. This will ensure that an exception is thrown when // the first time a disabled table is interacted with. if (!tableName.equals(TableName.META_TABLE_NAME) && isTableDisabled(tableName)) { throw new TableNotEnabledException(tableName.getNameAsString() + " is disabled."); @@ -871,8 +857,8 @@ public RegionLocations locateRegion(final TableName tableName, final byte[] row, } } - private RegionLocations locateMeta(final TableName tableName, - boolean useCache, int replicaId) throws IOException { + private RegionLocations locateMeta(final TableName tableName, boolean useCache, int replicaId) + throws IOException { // HBASE-10785: We cache the location of the META itself, so that we are not overloading // zookeeper with one request for every region lookup. We cache the META with empty row // key in MetaCache. @@ -924,15 +910,15 @@ private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, bool // without knowing the precise region names. byte[] metaStartKey = RegionInfo.createRegionName(tableName, row, HConstants.NINES, false); byte[] metaStopKey = - RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); + RegionInfo.createRegionName(tableName, HConstants.EMPTY_START_ROW, "", false); Scan s = new Scan().withStartRow(metaStartKey).withStopRow(metaStopKey, true) - .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(1) - .setReadType(ReadType.PREAD); + .addFamily(HConstants.CATALOG_FAMILY).setReversed(true).setCaching(1) + .setReadType(ReadType.PREAD); switch (this.metaReplicaMode) { case LOAD_BALANCE: - int metaReplicaId = this.metaReplicaSelector.select(tableName, row, - RegionLocateType.CURRENT); + int metaReplicaId = + this.metaReplicaSelector.select(tableName, row, RegionLocateType.CURRENT); if (metaReplicaId != RegionInfo.DEFAULT_REPLICA_ID) { // If the selector gives a non-primary meta replica region, then go with it. // Otherwise, just go to primary in non-hedgedRead mode. @@ -948,7 +934,7 @@ private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, bool } int maxAttempts = (retry ? numTries : 1); boolean relocateMeta = false; - for (int tries = 0; ; tries++) { + for (int tries = 0;; tries++) { if (tries >= maxAttempts) { throw new NoServerForRegionException("Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName + " after " + tries + " tries."); @@ -981,9 +967,9 @@ private RegionLocations locateRegionInMeta(TableName tableName, byte[] row, bool RegionInfo.DEFAULT_REPLICA_ID); } s.resetMvccReadPoint(); - try (ReversedClientScanner rcs = - new ReversedClientScanner(conf, s, TableName.META_TABLE_NAME, this, rpcCallerFactory, - rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) { + try (ReversedClientScanner rcs = new ReversedClientScanner(conf, s, + TableName.META_TABLE_NAME, this, rpcCallerFactory, rpcControllerFactory, + getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSecond)) { boolean tableNotFound = true; for (;;) { Result regionInfoRow = rcs.next(); @@ -992,7 +978,7 @@ rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSeco throw new TableNotFoundException(tableName); } else { throw new IOException( - "Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName); + "Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName); } } tableNotFound = false; @@ -1003,8 +989,8 @@ rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSeco } RegionInfo regionInfo = locations.getRegionLocation(replicaId).getRegion(); if (regionInfo == null) { - throw new IOException("RegionInfo null or empty in " + TableName.META_TABLE_NAME + - ", row=" + regionInfoRow); + throw new IOException("RegionInfo null or empty in " + TableName.META_TABLE_NAME + + ", row=" + regionInfoRow); } // See HBASE-20182. It is possible that we locate to a split parent even after the // children are online, so here we need to skip this region and go to the next one. @@ -1012,26 +998,26 @@ rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSeco continue; } if (regionInfo.isOffline()) { - throw new RegionOfflineException("Region offline; disable table call? " + - regionInfo.getRegionNameAsString()); + throw new RegionOfflineException( + "Region offline; disable table call? " + regionInfo.getRegionNameAsString()); } // It is possible that the split children have not been online yet and we have skipped // the parent in the above condition, so we may have already reached a region which does // not contains us. if (!regionInfo.containsRow(row)) { throw new IOException( - "Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName); + "Unable to find region for " + Bytes.toStringBinary(row) + " in " + tableName); } ServerName serverName = locations.getRegionLocation(replicaId).getServerName(); if (serverName == null) { - throw new NoServerForRegionException("No server address listed in " + - TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + - " containing row " + Bytes.toStringBinary(row)); + throw new NoServerForRegionException("No server address listed in " + + TableName.META_TABLE_NAME + " for region " + regionInfo.getRegionNameAsString() + + " containing row " + Bytes.toStringBinary(row)); } if (isDeadServer(serverName)) { throw new RegionServerStoppedException( - "hbase:meta says the region " + regionInfo.getRegionNameAsString() + - " is managed by the server " + serverName + ", but it is dead."); + "hbase:meta says the region " + regionInfo.getRegionNameAsString() + + " is managed by the server " + serverName + ", but it is dead."); } // Instantiate the location cacheLocation(tableName, locations); @@ -1050,29 +1036,31 @@ rpcControllerFactory, getMetaLookupPool(), metaReplicaCallTimeoutScanInMicroSeco } catch (IOException e) { ExceptionUtil.rethrowIfInterrupt(e); if (e instanceof RemoteException) { - e = ((RemoteException)e).unwrapRemoteException(); + e = ((RemoteException) e).unwrapRemoteException(); } if (e instanceof CallQueueTooBigException) { // Give a special check on CallQueueTooBigException, see #HBASE-17114 pauseBase = this.pauseForCQTBE; } if (tries < maxAttempts - 1) { - LOG.debug("locateRegionInMeta parentTable='{}', attempt={} of {} failed; retrying " + - "after sleep of {}", TableName.META_TABLE_NAME, tries, maxAttempts, maxAttempts, e); + LOG.debug( + "locateRegionInMeta parentTable='{}', attempt={} of {} failed; retrying " + + "after sleep of {}", + TableName.META_TABLE_NAME, tries, maxAttempts, maxAttempts, e); } else { throw e; } // Only relocate the parent region if necessary relocateMeta = - !(e instanceof RegionOfflineException || e instanceof NoServerForRegionException); + !(e instanceof RegionOfflineException || e instanceof NoServerForRegionException); } finally { userRegionLock.unlock(); } - try{ + try { Thread.sleep(ConnectionUtils.getPauseTime(pauseBase, tries)); } catch (InterruptedException e) { - throw new InterruptedIOException("Giving up trying to location region in " + - "meta: thread is interrupted."); + throw new InterruptedIOException( + "Giving up trying to location region in " + "meta: thread is interrupted."); } } } @@ -1081,8 +1069,8 @@ void takeUserRegionLock() throws IOException { try { long waitTime = connectionConfig.getMetaOperationTimeout(); if (!userRegionLock.tryLock(waitTime, TimeUnit.MILLISECONDS)) { - throw new LockTimeoutException("Failed to get user region lock in" - + waitTime + " ms. " + " for accessing meta region server."); + throw new LockTimeoutException("Failed to get user region lock in" + waitTime + " ms. " + + " for accessing meta region server."); } } catch (InterruptedException ie) { LOG.error("Interrupted while waiting for a lock", ie); @@ -1101,12 +1089,11 @@ public void cacheLocation(final TableName tableName, final RegionLocations locat } /** - * Search the cache for a location that fits our table and row key. - * Return null if no suitable region is located. + * Search the cache for a location that fits our table and row key. Return null if no suitable + * region is located. * @return Null or region location found in cache. */ - RegionLocations getCachedLocation(final TableName tableName, - final byte [] row) { + RegionLocations getCachedLocation(final TableName tableName, final byte[] row) { return metaCache.getCachedLocation(tableName, row); } @@ -1180,7 +1167,7 @@ boolean isMasterRunning() throws IOException { } catch (Exception e) { throw ProtobufUtil.handleRemoteException(e); } - return response != null? response.getIsMasterRunning(): false; + return response != null ? response.getIsMasterRunning() : false; } } @@ -1189,7 +1176,8 @@ boolean isMasterRunning() throws IOException { */ static class ServerErrorTracker { // We need a concurrent map here, as we could have multiple threads updating it in parallel. - private final ConcurrentMap errorsByServer = new ConcurrentHashMap<>(); + private final ConcurrentMap errorsByServer = + new ConcurrentHashMap<>(); private final long canRetryUntil; private final int maxTries;// max number to try private final long startTrackingTime; @@ -1211,14 +1199,13 @@ public ServerErrorTracker(long timeout, int maxTries) { */ boolean canTryMore(int numAttempt) { // If there is a single try we must not take into account the time. - return numAttempt < maxTries || (maxTries > 1 && - EnvironmentEdgeManager.currentTime() < this.canRetryUntil); + return numAttempt < maxTries + || (maxTries > 1 && EnvironmentEdgeManager.currentTime() < this.canRetryUntil); } /** * Calculates the back-off time for a retrying request to a particular server. - * - * @param server The server in question. + * @param server The server in question. * @param basePause The default hci pause. * @return The time to wait before sending next request. */ @@ -1292,8 +1279,7 @@ private MasterProtos.MasterService.BlockingInterface makeStubNoRetries() throw new MasterNotRunningException(sn + " is dead."); } // Use the security info interface name as our stub key - String key = - getStubKey(MasterProtos.MasterService.getDescriptor().getName(), sn); + String key = getStubKey(MasterProtos.MasterService.getDescriptor().getName(), sn); MasterProtos.MasterService.BlockingInterface stub = (MasterProtos.MasterService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { BlockingRpcChannel channel = rpcClient.createBlockingRpcChannel(sn, user, rpcTimeout); @@ -1355,8 +1341,8 @@ public BlockingInterface getClient(ServerName serverName) throws IOException { if (isDeadServer(serverName)) { throw new RegionServerStoppedException(serverName + " is dead."); } - String key = getStubKey(ClientProtos.ClientService.BlockingInterface.class.getName(), - serverName); + String key = + getStubKey(ClientProtos.ClientService.BlockingInterface.class.getName(), serverName); return (ClientProtos.ClientService.BlockingInterface) computeIfAbsentEx(stubs, key, () -> { BlockingRpcChannel channel = this.rpcClient.createBlockingRpcChannel(serverName, user, rpcTimeout); @@ -1389,44 +1375,38 @@ private MasterKeepAliveConnection getKeepAliveMasterService() throws IOException MasterServiceState mss = masterServiceState; @Override - public MasterProtos.AbortProcedureResponse abortProcedure( - RpcController controller, + public MasterProtos.AbortProcedureResponse abortProcedure(RpcController controller, MasterProtos.AbortProcedureRequest request) throws ServiceException { return stub.abortProcedure(controller, request); } @Override - public MasterProtos.GetProceduresResponse getProcedures( - RpcController controller, + public MasterProtos.GetProceduresResponse getProcedures(RpcController controller, MasterProtos.GetProceduresRequest request) throws ServiceException { return stub.getProcedures(controller, request); } @Override - public MasterProtos.GetLocksResponse getLocks( - RpcController controller, + public MasterProtos.GetLocksResponse getLocks(RpcController controller, MasterProtos.GetLocksRequest request) throws ServiceException { return stub.getLocks(controller, request); } @Override - public MasterProtos.AddColumnResponse addColumn( - RpcController controller, + public MasterProtos.AddColumnResponse addColumn(RpcController controller, MasterProtos.AddColumnRequest request) throws ServiceException { return stub.addColumn(controller, request); } @Override public MasterProtos.DeleteColumnResponse deleteColumn(RpcController controller, - MasterProtos.DeleteColumnRequest request) - throws ServiceException { + MasterProtos.DeleteColumnRequest request) throws ServiceException { return stub.deleteColumn(controller, request); } @Override public MasterProtos.ModifyColumnResponse modifyColumn(RpcController controller, - MasterProtos.ModifyColumnRequest request) - throws ServiceException { + MasterProtos.ModifyColumnRequest request) throws ServiceException { return stub.modifyColumn(controller, request); } @@ -1437,9 +1417,8 @@ public MasterProtos.MoveRegionResponse moveRegion(RpcController controller, } @Override - public MasterProtos.MergeTableRegionsResponse mergeTableRegions( - RpcController controller, MasterProtos.MergeTableRegionsRequest request) - throws ServiceException { + public MasterProtos.MergeTableRegionsResponse mergeTableRegions(RpcController controller, + MasterProtos.MergeTableRegionsRequest request) throws ServiceException { return stub.mergeTableRegions(controller, request); } @@ -1517,8 +1496,8 @@ public MasterProtos.StopMasterResponse stopMaster(RpcController controller, @Override public MasterProtos.IsInMaintenanceModeResponse isMasterInMaintenanceMode( - final RpcController controller, - final MasterProtos.IsInMaintenanceModeRequest request) throws ServiceException { + final RpcController controller, final MasterProtos.IsInMaintenanceModeRequest request) + throws ServiceException { return stub.isMasterInMaintenanceMode(controller, request); } @@ -1529,22 +1508,20 @@ public MasterProtos.BalanceResponse balance(RpcController controller, } @Override - public MasterProtos.SetBalancerRunningResponse setBalancerRunning( - RpcController controller, MasterProtos.SetBalancerRunningRequest request) - throws ServiceException { + public MasterProtos.SetBalancerRunningResponse setBalancerRunning(RpcController controller, + MasterProtos.SetBalancerRunningRequest request) throws ServiceException { return stub.setBalancerRunning(controller, request); } @Override - public NormalizeResponse normalize(RpcController controller, - NormalizeRequest request) throws ServiceException { + public NormalizeResponse normalize(RpcController controller, NormalizeRequest request) + throws ServiceException { return stub.normalize(controller, request); } @Override - public SetNormalizerRunningResponse setNormalizerRunning( - RpcController controller, SetNormalizerRunningRequest request) - throws ServiceException { + public SetNormalizerRunningResponse setNormalizerRunning(RpcController controller, + SetNormalizerRunningRequest request) throws ServiceException { return stub.setNormalizerRunning(controller, request); } @@ -1570,8 +1547,7 @@ public MasterProtos.IsCatalogJanitorEnabledResponse isCatalogJanitorEnabled( @Override public MasterProtos.RunCleanerChoreResponse runCleanerChore(RpcController controller, - MasterProtos.RunCleanerChoreRequest request) - throws ServiceException { + MasterProtos.RunCleanerChoreRequest request) throws ServiceException { return stub.runCleanerChore(controller, request); } @@ -1590,9 +1566,8 @@ public MasterProtos.IsCleanerChoreEnabledResponse isCleanerChoreEnabled( } @Override - public ClientProtos.CoprocessorServiceResponse execMasterService( - RpcController controller, ClientProtos.CoprocessorServiceRequest request) - throws ServiceException { + public ClientProtos.CoprocessorServiceResponse execMasterService(RpcController controller, + ClientProtos.CoprocessorServiceRequest request) throws ServiceException { return stub.execMasterService(controller, request); } @@ -1622,16 +1597,14 @@ public MasterProtos.IsSnapshotDoneResponse isSnapshotDone(RpcController controll } @Override - public MasterProtos.RestoreSnapshotResponse restoreSnapshot( - RpcController controller, MasterProtos.RestoreSnapshotRequest request) - throws ServiceException { + public MasterProtos.RestoreSnapshotResponse restoreSnapshot(RpcController controller, + MasterProtos.RestoreSnapshotRequest request) throws ServiceException { return stub.restoreSnapshot(controller, request); } @Override - public MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup( - RpcController controller, MasterProtos.SetSnapshotCleanupRequest request) - throws ServiceException { + public MasterProtos.SetSnapshotCleanupResponse switchSnapshotCleanup(RpcController controller, + MasterProtos.SetSnapshotCleanupRequest request) throws ServiceException { return stub.switchSnapshotCleanup(controller, request); } @@ -1643,16 +1616,14 @@ public MasterProtos.IsSnapshotCleanupEnabledResponse isSnapshotCleanupEnabled( } @Override - public MasterProtos.ExecProcedureResponse execProcedure( - RpcController controller, MasterProtos.ExecProcedureRequest request) - throws ServiceException { + public MasterProtos.ExecProcedureResponse execProcedure(RpcController controller, + MasterProtos.ExecProcedureRequest request) throws ServiceException { return stub.execProcedure(controller, request); } @Override - public MasterProtos.ExecProcedureResponse execProcedureWithRet( - RpcController controller, MasterProtos.ExecProcedureRequest request) - throws ServiceException { + public MasterProtos.ExecProcedureResponse execProcedureWithRet(RpcController controller, + MasterProtos.ExecProcedureRequest request) throws ServiceException { return stub.execProcedureWithRet(controller, request); } @@ -1669,72 +1640,66 @@ public MasterProtos.GetProcedureResultResponse getProcedureResult(RpcController } @Override - public MasterProtos.IsMasterRunningResponse isMasterRunning( - RpcController controller, MasterProtos.IsMasterRunningRequest request) - throws ServiceException { + public MasterProtos.IsMasterRunningResponse isMasterRunning(RpcController controller, + MasterProtos.IsMasterRunningRequest request) throws ServiceException { return stub.isMasterRunning(controller, request); } @Override public MasterProtos.ModifyNamespaceResponse modifyNamespace(RpcController controller, - MasterProtos.ModifyNamespaceRequest request) - throws ServiceException { + MasterProtos.ModifyNamespaceRequest request) throws ServiceException { return stub.modifyNamespace(controller, request); } @Override - public MasterProtos.CreateNamespaceResponse createNamespace( - RpcController controller, + public MasterProtos.CreateNamespaceResponse createNamespace(RpcController controller, MasterProtos.CreateNamespaceRequest request) throws ServiceException { return stub.createNamespace(controller, request); } @Override - public MasterProtos.DeleteNamespaceResponse deleteNamespace( - RpcController controller, + public MasterProtos.DeleteNamespaceResponse deleteNamespace(RpcController controller, MasterProtos.DeleteNamespaceRequest request) throws ServiceException { return stub.deleteNamespace(controller, request); } @Override - public MasterProtos.ListNamespacesResponse listNamespaces( - RpcController controller, + public MasterProtos.ListNamespacesResponse listNamespaces(RpcController controller, MasterProtos.ListNamespacesRequest request) throws ServiceException { return stub.listNamespaces(controller, request); } @Override public MasterProtos.GetNamespaceDescriptorResponse getNamespaceDescriptor( - RpcController controller, - MasterProtos.GetNamespaceDescriptorRequest request) throws ServiceException { + RpcController controller, MasterProtos.GetNamespaceDescriptorRequest request) + throws ServiceException { return stub.getNamespaceDescriptor(controller, request); } @Override public MasterProtos.ListNamespaceDescriptorsResponse listNamespaceDescriptors( - RpcController controller, - MasterProtos.ListNamespaceDescriptorsRequest request) throws ServiceException { + RpcController controller, MasterProtos.ListNamespaceDescriptorsRequest request) + throws ServiceException { return stub.listNamespaceDescriptors(controller, request); } @Override public MasterProtos.ListTableDescriptorsByNamespaceResponse listTableDescriptorsByNamespace( RpcController controller, MasterProtos.ListTableDescriptorsByNamespaceRequest request) - throws ServiceException { + throws ServiceException { return stub.listTableDescriptorsByNamespace(controller, request); } @Override public MasterProtos.ListTableNamesByNamespaceResponse listTableNamesByNamespace( RpcController controller, MasterProtos.ListTableNamesByNamespaceRequest request) - throws ServiceException { + throws ServiceException { return stub.listTableNamesByNamespace(controller, request); } @Override - public MasterProtos.GetTableStateResponse getTableState( - RpcController controller, MasterProtos.GetTableStateRequest request) - throws ServiceException { + public MasterProtos.GetTableStateResponse getTableState(RpcController controller, + MasterProtos.GetTableStateRequest request) throws ServiceException { return stub.getTableState(controller, request); } @@ -1751,30 +1716,26 @@ public MasterProtos.GetSchemaAlterStatusResponse getSchemaAlterStatus( } @Override - public MasterProtos.GetTableDescriptorsResponse getTableDescriptors( - RpcController controller, MasterProtos.GetTableDescriptorsRequest request) - throws ServiceException { + public MasterProtos.GetTableDescriptorsResponse getTableDescriptors(RpcController controller, + MasterProtos.GetTableDescriptorsRequest request) throws ServiceException { return stub.getTableDescriptors(controller, request); } @Override - public MasterProtos.GetTableNamesResponse getTableNames( - RpcController controller, MasterProtos.GetTableNamesRequest request) - throws ServiceException { + public MasterProtos.GetTableNamesResponse getTableNames(RpcController controller, + MasterProtos.GetTableNamesRequest request) throws ServiceException { return stub.getTableNames(controller, request); } @Override - public MasterProtos.GetClusterStatusResponse getClusterStatus( - RpcController controller, MasterProtos.GetClusterStatusRequest request) - throws ServiceException { + public MasterProtos.GetClusterStatusResponse getClusterStatus(RpcController controller, + MasterProtos.GetClusterStatusRequest request) throws ServiceException { return stub.getClusterStatus(controller, request); } @Override - public MasterProtos.SetQuotaResponse setQuota( - RpcController controller, MasterProtos.SetQuotaRequest request) - throws ServiceException { + public MasterProtos.SetQuotaResponse setQuota(RpcController controller, + MasterProtos.SetQuotaRequest request) throws ServiceException { return stub.setQuota(controller, request); } @@ -1800,15 +1761,15 @@ public IsBalancerEnabledResponse isBalancerEnabled(RpcController controller, @Override public MasterProtos.SetSplitOrMergeEnabledResponse setSplitOrMergeEnabled( - RpcController controller, MasterProtos.SetSplitOrMergeEnabledRequest request) - throws ServiceException { + RpcController controller, MasterProtos.SetSplitOrMergeEnabledRequest request) + throws ServiceException { return stub.setSplitOrMergeEnabled(controller, request); } @Override public MasterProtos.IsSplitOrMergeEnabledResponse isSplitOrMergeEnabled( - RpcController controller, MasterProtos.IsSplitOrMergeEnabledRequest request) - throws ServiceException { + RpcController controller, MasterProtos.IsSplitOrMergeEnabledRequest request) + throws ServiceException { return stub.isSplitOrMergeEnabled(controller, request); } @@ -1849,8 +1810,9 @@ public DisableReplicationPeerResponse disableReplicationPeer(RpcController contr } @Override - public ListDecommissionedRegionServersResponse listDecommissionedRegionServers(RpcController controller, - ListDecommissionedRegionServersRequest request) throws ServiceException { + public ListDecommissionedRegionServersResponse listDecommissionedRegionServers( + RpcController controller, ListDecommissionedRegionServersRequest request) + throws ServiceException { return stub.listDecommissionedRegionServers(controller, request); } @@ -1861,9 +1823,8 @@ public DecommissionRegionServersResponse decommissionRegionServers(RpcController } @Override - public RecommissionRegionServerResponse recommissionRegionServer( - RpcController controller, RecommissionRegionServerRequest request) - throws ServiceException { + public RecommissionRegionServerResponse recommissionRegionServer(RpcController controller, + RecommissionRegionServerRequest request) throws ServiceException { return stub.recommissionRegionServer(controller, request); } @@ -1887,15 +1848,14 @@ public ListReplicationPeersResponse listReplicationPeers(RpcController controlle } @Override - public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes( - RpcController controller, GetSpaceQuotaRegionSizesRequest request) - throws ServiceException { + public GetSpaceQuotaRegionSizesResponse getSpaceQuotaRegionSizes(RpcController controller, + GetSpaceQuotaRegionSizesRequest request) throws ServiceException { return stub.getSpaceQuotaRegionSizes(controller, request); } @Override - public GetQuotaStatesResponse getQuotaStates( - RpcController controller, GetQuotaStatesRequest request) throws ServiceException { + public GetQuotaStatesResponse getQuotaStates(RpcController controller, + GetQuotaStatesRequest request) throws ServiceException { return stub.getQuotaStates(controller, request); } @@ -1955,15 +1915,15 @@ public HBaseProtos.LogEntry getLogEntries(RpcController controller, @Override public ModifyTableStoreFileTrackerResponse modifyTableStoreFileTracker( - RpcController controller, ModifyTableStoreFileTrackerRequest request) - throws ServiceException { + RpcController controller, ModifyTableStoreFileTrackerRequest request) + throws ServiceException { return stub.modifyTableStoreFileTracker(controller, request); } @Override public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker( - RpcController controller, ModifyColumnStoreFileTrackerRequest request) - throws ServiceException { + RpcController controller, ModifyColumnStoreFileTrackerRequest request) + throws ServiceException { return stub.modifyColumnStoreFileTracker(controller, request); } }; @@ -1971,19 +1931,19 @@ public ModifyColumnStoreFileTrackerResponse modifyColumnStoreFileTracker( private static void release(MasterServiceState mss) { if (mss != null && mss.connection != null) { - ((ConnectionImplementation)mss.connection).releaseMaster(mss); + ((ConnectionImplementation) mss.connection).releaseMaster(mss); } } private boolean isKeepAliveMasterConnectedAndRunning(MasterServiceState mss) { - if (mss.getStub() == null){ + if (mss.getStub() == null) { return false; } try { return mss.isMasterRunning(); } catch (UndeclaredThrowableException e) { // It's somehow messy, but we can receive exceptions such as - // java.net.ConnectException but they're not declared. So we catch it... + // java.net.ConnectException but they're not declared. So we catch it... LOG.info("Master connection is not running anymore", e.getUndeclaredThrowable()); return false; } catch (IOException se) { @@ -2030,19 +1990,19 @@ public void deleteCachedRegionLocation(final HRegionLocation location) { } /** - * Update the location with the new value (if the exception is a RegionMovedException) - * or delete it from the cache. Does nothing if we can be sure from the exception that - * the location is still accurate, or if the cache has already been updated. - * @param exception an object (to simplify user code) on which we will try to find a nested - * or wrapped or both RegionMovedException + * Update the location with the new value (if the exception is a RegionMovedException) or delete + * it from the cache. Does nothing if we can be sure from the exception that the location is still + * accurate, or if the cache has already been updated. + * @param exception an object (to simplify user code) on which we will try to find a nested or + * wrapped or both RegionMovedException * @param source server that is the source of the location update. */ @Override public void updateCachedLocations(final TableName tableName, byte[] regionName, byte[] rowkey, - final Object exception, final ServerName source) { + final Object exception, final ServerName source) { if (rowkey == null || tableName == null) { - LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) + - ", tableName=" + (tableName == null ? "null" : tableName)); + LOG.warn("Coding error, see method javadoc. row=" + (rowkey == null ? "null" : rowkey) + + ", tableName=" + (tableName == null ? "null" : tableName)); return; } @@ -2068,7 +2028,7 @@ public void updateCachedLocations(final TableName tableName, byte[] regionName, } if (oldLocation == null || !source.equals(oldLocation.getServerName())) { // There is no such location in the cache (it's been removed already) or - // the cache has already been refreshed with a different location. => nothing to do + // the cache has already been refreshed with a different location. => nothing to do return; } @@ -2083,14 +2043,12 @@ public void updateCachedLocations(final TableName tableName, byte[] regionName, if (cause instanceof RegionMovedException) { RegionMovedException rme = (RegionMovedException) cause; if (LOG.isTraceEnabled()) { - LOG.trace("Region " + regionInfo.getRegionNameAsString() + " moved to " + - rme.getHostname() + ":" + rme.getPort() + - " according to " + source.getAddress()); + LOG.trace("Region " + regionInfo.getRegionNameAsString() + " moved to " + + rme.getHostname() + ":" + rme.getPort() + " according to " + source.getAddress()); } // We know that the region is not anymore on this region server, but we know - // the new location. - updateCachedLocation( - regionInfo, source, rme.getServerName(), rme.getLocationSeqNum()); + // the new location. + updateCachedLocation(regionInfo, source, rme.getServerName(), rme.getLocationSeqNum()); return; } } @@ -2127,8 +2085,7 @@ public ClientBackoffPolicy getBackoffPolicy() { } /* - * Return the number of cached region for a table. It will only be called - * from a unit test. + * Return the number of cached region for a table. It will only be called from a unit test. */ int getNumberOfCachedRegionLocations(final TableName tableName) { return metaCache.getNumberOfCachedRegionLocations(tableName); @@ -2152,7 +2109,7 @@ public boolean isClosed() { } @Override - public boolean isAborted(){ + public boolean isAborted() { return this.aborted; } @@ -2187,12 +2144,11 @@ public void close() { } /** - * Close the connection for good. On the off chance that someone is unable to close - * the connection, perhaps because it bailed out prematurely, the method - * below will ensure that this instance is cleaned up. - * Caveat: The JVM may take an unknown amount of time to call finalize on an - * unreachable object, so our hope is that every consumer cleans up after - * itself, like any good citizen. + * Close the connection for good. On the off chance that someone is unable to close the + * connection, perhaps because it bailed out prematurely, the method below will ensure that this + * instance is cleaned up. Caveat: The JVM may take an unknown amount of time to call finalize on + * an unreachable object, so our hope is that every consumer cleans up after itself, like any good + * citizen. */ @Override protected void finalize() throws Throwable { @@ -2217,8 +2173,8 @@ public TableState getTableState(TableName tableName) throws IOException { @Override public RpcRetryingCallerFactory getNewRpcRetryingCallerFactory(Configuration conf) { - return RpcRetryingCallerFactory - .instantiate(conf, this.interceptor, this.getStatisticsTracker()); + return RpcRetryingCallerFactory.instantiate(conf, this.interceptor, + this.getStatisticsTracker()); } @Override @@ -2256,7 +2212,7 @@ private static T get(CompletableFuture future) throws IOException { @Override public List getLiveRegionServers(Supplier masterAddrTracker, int count) - throws IOException { + throws IOException { RegionServerStatusService.BlockingInterface stub = RegionServerStatusService.newBlockingStub( rpcClient.createBlockingRpcChannel(masterAddrTracker.get(), user, rpcTimeout)); GetLiveRegionServersResponse resp; @@ -2269,13 +2225,13 @@ public List getLiveRegionServers(Supplier masterAddrTrac throw new IOException(t); } return resp.getServerList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()); + .collect(Collectors.toList()); } @Override public List getAllBootstrapNodes(ServerName regionServer) throws IOException { BootstrapNodeService.BlockingInterface stub = BootstrapNodeService - .newBlockingStub(rpcClient.createBlockingRpcChannel(regionServer, user, rpcTimeout)); + .newBlockingStub(rpcClient.createBlockingRpcChannel(regionServer, user, rpcTimeout)); GetAllBootstrapNodesResponse resp; try { resp = stub.getAllBootstrapNodes(null, GetAllBootstrapNodesRequest.getDefaultInstance()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java index 975d8df71808..2ace3959ffa6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistry.java @@ -25,8 +25,8 @@ /** * Registry for meta information needed for connection setup to a HBase cluster. Implementations - * hold cluster information such as this cluster's id, location of hbase:meta, etc.. - * Internal use only. + * hold cluster information such as this cluster's id, location of hbase:meta, etc.. Internal use + * only. */ @InterfaceAudience.Private public interface ConnectionRegistry extends Closeable { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java index e9af7e7b2e0d..9133aaaee5f3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionRegistryFactory.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; @@ -36,8 +37,8 @@ private ConnectionRegistryFactory() { */ static ConnectionRegistry getRegistry(Configuration conf) { Class clazz = - conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class, - ConnectionRegistry.class); + conf.getClass(CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, ZKConnectionRegistry.class, + ConnectionRegistry.class); return ReflectionUtils.newInstance(clazz, conf); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java index 8c305b6d2ef8..7fa53f653ab7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionUtils.java @@ -141,9 +141,9 @@ public static class ShortCircuitingClusterConnection extends ConnectionImplement private final AdminService.BlockingInterface localHostAdmin; private final ClientService.BlockingInterface localHostClient; - private ShortCircuitingClusterConnection(Configuration conf, User user, - ServerName serverName, AdminService.BlockingInterface admin, - ClientService.BlockingInterface client, ConnectionRegistry registry) throws IOException { + private ShortCircuitingClusterConnection(Configuration conf, User user, ServerName serverName, + AdminService.BlockingInterface admin, ClientService.BlockingInterface client, + ConnectionRegistry registry) throws IOException { super(conf, null, user, registry); this.serverName = serverName; this.localHostAdmin = admin; @@ -164,7 +164,7 @@ public ClientService.BlockingInterface getClient(ServerName sn) throws IOExcepti public MasterKeepAliveConnection getMaster() throws IOException { if (this.localHostClient instanceof MasterService.BlockingInterface) { return new ShortCircuitMasterConnection( - (MasterService.BlockingInterface) this.localHostClient); + (MasterService.BlockingInterface) this.localHostClient); } return super.getMaster(); } @@ -183,8 +183,9 @@ public MasterKeepAliveConnection getMaster() throws IOException { * @throws IOException if IO failure occurred */ public static ClusterConnection createShortCircuitConnection(final Configuration conf, User user, - final ServerName serverName, final AdminService.BlockingInterface admin, - final ClientService.BlockingInterface client, ConnectionRegistry registry) throws IOException { + final ServerName serverName, final AdminService.BlockingInterface admin, + final ClientService.BlockingInterface client, ConnectionRegistry registry) + throws IOException { if (user == null) { user = UserProvider.instantiate(conf).getCurrent(); } @@ -397,7 +398,7 @@ static boolean noMoreResultsForReverseScan(Scan scan, RegionInfo info) { static CompletableFuture> allOf(List> futures) { return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])) - .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList())); + .thenApply(v -> futures.stream().map(f -> f.getNow(null)).collect(toList())); } public static ScanResultCache createScanResultCache(Scan scan) { @@ -545,8 +546,8 @@ static CompletableFuture timelineConsistentRead(AsyncRegionLocator locato (locs, error) -> { if (error != null) { LOG.warn( - "Failed to locate all the replicas for table={}, row='{}', locateType={}" + - " give up timeline consistent read", + "Failed to locate all the replicas for table={}, row='{}', locateType={}" + + " give up timeline consistent read", tableName, Bytes.toStringBinary(row), locateType, error); return; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java index 533bd0f41b6d..45dec17a6958 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Consistency.java @@ -1,13 +1,13 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file + * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file + * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -27,22 +26,18 @@ public enum Consistency { // developer note: Do not reorder. Client.proto#Consistency depends on this order /** - * Strong consistency is the default consistency model in HBase, - * where reads and writes go through a single server which serializes - * the updates, and returns all data that was written and ack'd. + * Strong consistency is the default consistency model in HBase, where reads and writes go through + * a single server which serializes the updates, and returns all data that was written and ack'd. */ STRONG, /** - * Timeline consistent reads might return values that may not see - * the most recent updates. Write transactions are always performed - * in strong consistency model in HBase which guarantees that transactions - * are ordered, and replayed in the same order by all copies of the data. - * In timeline consistency, the get and scan requests can be answered from data - * that may be stale. - *
    - * The client may still observe transactions out of order if the requests are - * responded from different servers. + * Timeline consistent reads might return values that may not see the most recent updates. Write + * transactions are always performed in strong consistency model in HBase which guarantees that + * transactions are ordered, and replayed in the same order by all copies of the data. In timeline + * consistency, the get and scan requests can be answered from data that may be stale.
    + * The client may still observe transactions out of order if the requests are responded from + * different servers. */ TIMELINE, } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java index 72d588bc9763..3331c8107009 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,9 +22,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * CoprocessorDescriptor contains the details about how to build a coprocessor. - * This class is a pojo so there are no checks for the details carried by this class. - * Use {@link CoprocessorDescriptorBuilder} to instantiate a CoprocessorDescriptor + * CoprocessorDescriptor contains the details about how to build a coprocessor. This class is a pojo + * so there are no checks for the details carried by this class. Use + * {@link CoprocessorDescriptorBuilder} to instantiate a CoprocessorDescriptor */ @InterfaceAudience.Public public interface CoprocessorDescriptor { @@ -45,7 +44,7 @@ public interface CoprocessorDescriptor { int getPriority(); /** - * @return Arbitrary key-value parameter pairs passed into the coprocessor. + * @return Arbitrary key-value parameter pairs passed into the coprocessor. */ Map getProperties(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java index 71d1264c0741..1bc64d01fbe1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/CoprocessorDescriptorBuilder.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -80,7 +79,7 @@ private static final class CoprocessorDescriptorImpl implements CoprocessorDescr private final Map properties; private CoprocessorDescriptorImpl(String className, String jarPath, int priority, - Map properties) { + Map properties) { this.className = className; this.jarPath = jarPath; this.priority = priority; @@ -109,10 +108,8 @@ public Map getProperties() { @Override public String toString() { - return "class:" + className - + ", jarPath:" + jarPath - + ", priority:" + priority - + ", properties:" + properties; + return "class:" + className + ", jarPath:" + jarPath + ", priority:" + priority + + ", properties:" + properties; } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java index 837e72d109c2..73e128dfd8f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Cursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Scan cursor to tell client where server is scanning - * {@link Scan#setNeedCursorResult(boolean)} - * {@link Result#isCursor()} - * {@link Result#getCursor()} + * Scan cursor to tell client where server is scanning {@link Scan#setNeedCursorResult(boolean)} + * {@link Result#isCursor()} {@link Result#getCursor()} */ @InterfaceAudience.Public public class Cursor { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java index 8ab5d850d2d9..218e90c1440d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DelayingRunner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase.client; +import java.util.List; +import java.util.Map; +import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; - -import java.util.List; -import java.util.Map; /** * A wrapper for a runnable for a group of actions for a single regionserver. @@ -32,10 +31,10 @@ *

    *

    * This class exists to simulate using a ScheduledExecutorService with just a regular - * ExecutorService and Runnables. It is used for legacy reasons in the the client; this could - * only be removed if we change the expectations in HTable around the pool the client is able to - * pass in and even if we deprecate the current APIs would require keeping this class around - * for the interim to bridge between the legacy ExecutorServices and the scheduled pool. + * ExecutorService and Runnables. It is used for legacy reasons in the the client; this could only + * be removed if we change the expectations in HTable around the pool the client is able to pass in + * and even if we deprecate the current APIs would require keeping this class around for the interim + * to bridge between the legacy ExecutorServices and the scheduled pool. *

    */ @InterfaceAudience.Private @@ -60,10 +59,9 @@ public void setRunner(Runnable runner) { @Override public void run() { if (!sleep()) { - LOG.warn( - "Interrupted while sleeping for expected sleep time " + sleepTime + " ms"); + LOG.warn("Interrupted while sleeping for expected sleep time " + sleepTime + " ms"); } - //TODO maybe we should consider switching to a listenableFuture for the actual callable and + // TODO maybe we should consider switching to a listenableFuture for the actual callable and // then handling the results/errors as callbacks. That way we can decrement outstanding tasks // even if we get interrupted here, but for now, we still need to run so we decrement the // outstanding tasks @@ -73,12 +71,11 @@ public void run() { /** * Sleep for an expected amount of time. *

    - * This is nearly a copy of what the Sleeper does, but with the ability to know if you - * got interrupted while sleeping. + * This is nearly a copy of what the Sleeper does, but with the ability to know if you got + * interrupted while sleeping. *

    - * - * @return true if the sleep completely entirely successfully, - * but otherwise false if the sleep was interrupted. + * @return true if the sleep completely entirely successfully, but otherwise + * false if the sleep was interrupted. */ private boolean sleep() { long now = EnvironmentEdgeManager.currentTime(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java index c2f8ee5f1191..16706015b44a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Delete.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -35,76 +33,68 @@ /** * Used to perform Delete operations on a single row. *

    - * To delete an entire row, instantiate a Delete object with the row - * to delete. To further define the scope of what to delete, perform - * additional methods as outlined below. + * To delete an entire row, instantiate a Delete object with the row to delete. To further define + * the scope of what to delete, perform additional methods as outlined below. + *

    + * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} for each family to + * delete. *

    - * To delete specific families, execute {@link #addFamily(byte[]) deleteFamily} - * for each family to delete. + * To delete multiple versions of specific columns, execute {@link #addColumns(byte[], byte[]) + * deleteColumns} for each column to delete. *

    - * To delete multiple versions of specific columns, execute - * {@link #addColumns(byte[], byte[]) deleteColumns} - * for each column to delete. + * To delete specific versions of specific columns, execute {@link #addColumn(byte[], byte[], long) + * deleteColumn} for each column version to delete. *

    - * To delete specific versions of specific columns, execute - * {@link #addColumn(byte[], byte[], long) deleteColumn} - * for each column version to delete. + * Specifying timestamps, deleteFamily and deleteColumns will delete all versions with a timestamp + * less than or equal to that passed. If no timestamp is specified, an entry is added with a + * timestamp of 'now' where 'now' is the servers's EnvironmentEdgeManager.currentTime(). Specifying + * a timestamp to the deleteColumn method will delete versions only with a timestamp equal to that + * specified. If no timestamp is passed to deleteColumn, internally, it figures the most recent + * cell's timestamp and adds a delete at that timestamp; i.e. it deletes the most recently added + * cell. *

    - * Specifying timestamps, deleteFamily and deleteColumns will delete all - * versions with a timestamp less than or equal to that passed. If no - * timestamp is specified, an entry is added with a timestamp of 'now' - * where 'now' is the servers's EnvironmentEdgeManager.currentTime(). - * Specifying a timestamp to the deleteColumn method will - * delete versions only with a timestamp equal to that specified. - * If no timestamp is passed to deleteColumn, internally, it figures the - * most recent cell's timestamp and adds a delete at that timestamp; i.e. - * it deletes the most recently added cell. - *

    The timestamp passed to the constructor is used ONLY for delete of - * rows. For anything less -- a deleteColumn, deleteColumns or - * deleteFamily -- then you need to use the method overrides that take a - * timestamp. The constructor timestamp is not referenced. + * The timestamp passed to the constructor is used ONLY for delete of rows. For anything less -- a + * deleteColumn, deleteColumns or deleteFamily -- then you need to use the method overrides that + * take a timestamp. The constructor timestamp is not referenced. */ @InterfaceAudience.Public public class Delete extends Mutation { /** * Create a Delete operation for the specified row. *

    - * If no further operations are done, this will delete everything - * associated with the specified row (all versions of all columns in all - * families), with timestamp from current point in time to the past. - * Cells defining timestamp for a future point in time - * (timestamp > current time) will not be deleted. + * If no further operations are done, this will delete everything associated with the specified + * row (all versions of all columns in all families), with timestamp from current point in time to + * the past. Cells defining timestamp for a future point in time (timestamp > current time) will + * not be deleted. * @param row row key */ - public Delete(byte [] row) { + public Delete(byte[] row) { this(row, HConstants.LATEST_TIMESTAMP); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. * @param row row key * @param timestamp maximum version timestamp (only for delete row) */ - public Delete(byte [] row, long timestamp) { + public Delete(byte[] row, long timestamp) { this(row, 0, row.length, timestamp); } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. * @param row We make a local copy of this passed in row. * @param rowOffset * @param rowLength @@ -114,14 +104,13 @@ public Delete(final byte[] row, final int rowOffset, final int rowLength) { } /** - * Create a Delete operation for the specified row and timestamp.

    - * - * If no further operations are done, this will delete all columns in all - * families of the specified row with a timestamp less than or equal to the - * specified timestamp.

    - * - * This timestamp is ONLY used for a delete row operation. If specifying - * families or columns, you must specify each timestamp individually. + * Create a Delete operation for the specified row and timestamp. + *

    + * If no further operations are done, this will delete all columns in all families of the + * specified row with a timestamp less than or equal to the specified timestamp. + *

    + * This timestamp is ONLY used for a delete row operation. If specifying families or columns, you + * must specify each timestamp individually. * @param row We make a local copy of this passed in row. * @param rowOffset * @param rowLength @@ -141,15 +130,14 @@ public Delete(final Delete deleteToCopy) { } /** - * Construct the Delete with user defined data. NOTED: - * 1) all cells in the familyMap must have the delete type. - * see {@link org.apache.hadoop.hbase.Cell.Type} - * 2) the row of each cell must be same with passed row. + * Construct the Delete with user defined data. NOTED: 1) all cells in the familyMap must have the + * delete type. see {@link org.apache.hadoop.hbase.Cell.Type} 2) the row of each cell must be same + * with passed row. * @param row row. CAN'T be null * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Delete(byte[] row, long ts, NavigableMap> familyMap) { + public Delete(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } @@ -181,32 +169,30 @@ public Delete add(Cell cell) throws IOException { /** * Delete all versions of all columns of the specified family. *

    - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. + * Overrides previous calls to deleteColumn and deleteColumns for the specified family. * @param family family name * @return this for invocation chaining */ - public Delete addFamily(final byte [] family) { + public Delete addFamily(final byte[] family) { this.addFamily(family, this.ts); return this; } /** - * Delete all columns of the specified family with a timestamp less than - * or equal to the specified timestamp. + * Delete all columns of the specified family with a timestamp less than or equal to the specified + * timestamp. *

    - * Overrides previous calls to deleteColumn and deleteColumns for the - * specified family. + * Overrides previous calls to deleteColumn and deleteColumns for the specified family. * @param family family name * @param timestamp maximum version timestamp * @return this for invocation chaining */ - public Delete addFamily(final byte [] family, final long timestamp) { + public Delete addFamily(final byte[] family, final long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } List list = getCellList(family); - if(!list.isEmpty()) { + if (!list.isEmpty()) { list.clear(); } KeyValue kv = new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamily); @@ -215,16 +201,14 @@ public Delete addFamily(final byte [] family, final long timestamp) { } /** - * Delete all columns of the specified family with a timestamp equal to - * the specified timestamp. + * Delete all columns of the specified family with a timestamp equal to the specified timestamp. * @param family family name * @param timestamp version timestamp * @return this for invocation chaining */ - public Delete addFamilyVersion(final byte [] family, final long timestamp) { + public Delete addFamilyVersion(final byte[] family, final long timestamp) { List list = getCellList(family); - list.add(new KeyValue(row, family, null, timestamp, - KeyValue.Type.DeleteFamilyVersion)); + list.add(new KeyValue(row, family, null, timestamp, KeyValue.Type.DeleteFamilyVersion)); return this; } @@ -234,39 +218,37 @@ public Delete addFamilyVersion(final byte [] family, final long timestamp) { * @param qualifier column qualifier * @return this for invocation chaining */ - public Delete addColumns(final byte [] family, final byte [] qualifier) { + public Delete addColumns(final byte[] family, final byte[] qualifier) { addColumns(family, qualifier, this.ts); return this; } /** - * Delete all versions of the specified column with a timestamp less than - * or equal to the specified timestamp. + * Delete all versions of the specified column with a timestamp less than or equal to the + * specified timestamp. * @param family family name * @param qualifier column qualifier * @param timestamp maximum version timestamp * @return this for invocation chaining */ - public Delete addColumns(final byte [] family, final byte [] qualifier, final long timestamp) { + public Delete addColumns(final byte[] family, final byte[] qualifier, final long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } List list = getCellList(family); - list.add(new KeyValue(this.row, family, qualifier, timestamp, - KeyValue.Type.DeleteColumn)); + list.add(new KeyValue(this.row, family, qualifier, timestamp, KeyValue.Type.DeleteColumn)); return this; } /** - * Delete the latest version of the specified column. - * This is an expensive call in that on the server-side, it first does a - * get to find the latest versions timestamp. Then it adds a delete using - * the fetched cells timestamp. + * Delete the latest version of the specified column. This is an expensive call in that on the + * server-side, it first does a get to find the latest versions timestamp. Then it adds a delete + * using the fetched cells timestamp. * @param family family name * @param qualifier column qualifier * @return this for invocation chaining */ - public Delete addColumn(final byte [] family, final byte [] qualifier) { + public Delete addColumn(final byte[] family, final byte[] qualifier) { this.addColumn(family, qualifier, this.ts); return this; } @@ -278,7 +260,7 @@ public Delete addColumn(final byte [] family, final byte [] qualifier) { * @param timestamp version timestamp * @return this for invocation chaining */ - public Delete addColumn(byte [] family, byte [] qualifier, long timestamp) { + public Delete addColumn(byte[] family, byte[] qualifier, long timestamp) { if (timestamp < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + timestamp); } @@ -311,8 +293,8 @@ public Delete setDurability(Durability d) { /** * Method for setting the Delete's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Delete#Delete(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Delete#Delete(byte[], long, NavigableMap)} instead */ @Deprecated @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java index 9419137842f7..4bc7a76514a2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/DoNotRetryRegionException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.DoNotRetryIOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java index aaf0b5cc7320..7ee451b982cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Durability.java @@ -15,22 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; /** - * Enum describing the durability guarantees for tables and {@link Mutation}s - * Note that the items must be sorted in order of increasing durability + * Enum describing the durability guarantees for tables and {@link Mutation}s Note that the items + * must be sorted in order of increasing durability */ @InterfaceAudience.Public public enum Durability { /* Developer note: Do not rename the enum field names. They are serialized in HTableDescriptor */ /** - * If this is for tables durability, use HBase's global default value (SYNC_WAL). - * Otherwise, if this is for mutation, use the table's default setting to determine durability. - * This must remain the first option. + * If this is for tables durability, use HBase's global default value (SYNC_WAL). Otherwise, if + * this is for mutation, use the table's default setting to determine durability. This must remain + * the first option. */ USE_DEFAULT, /** @@ -42,15 +41,15 @@ public enum Durability { */ ASYNC_WAL, /** - * Write the Mutation to the WAL synchronously. - * The data is flushed to the filesystem implementation, but not necessarily to disk. - * For HDFS this will flush the data to the designated number of DataNodes. - * See HADOOP-6313 + * Write the Mutation to the WAL synchronously. The data is flushed to the filesystem + * implementation, but not necessarily to disk. For HDFS this will flush the data to the + * designated number of DataNodes. See + * HADOOP-6313 */ SYNC_WAL, /** - * Write the Mutation to the WAL synchronously and force the entries to disk. - * See HADOOP-6313 + * Write the Mutation to the WAL synchronously and force the entries to disk. See + * HADOOP-6313 */ FSYNC_WAL } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java index 163203b48ab3..13e032adb3f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FailureInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,15 +19,12 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; - import org.apache.yetus.audience.InterfaceAudience; /** * Keeps track of repeated failures to any region server. Multiple threads manipulate the contents - * of this thread. - * - * Access to the members is guarded by the concurrent nature of the members inherently. - * + * of this thread. Access to the members is guarded by the concurrent nature of the members + * inherently. */ @InterfaceAudience.Private class FailureInfo { @@ -46,12 +43,10 @@ class FailureInfo { @Override public String toString() { - return "FailureInfo: numConsecutiveFailures = " - + numConsecutiveFailures + " timeOfFirstFailureMilliSec = " - + timeOfFirstFailureMilliSec + " timeOfLatestAttemptMilliSec = " - + timeOfLatestAttemptMilliSec - + " exclusivelyRetringInspiteOfFastFail = " - + exclusivelyRetringInspiteOfFastFail.get(); + return "FailureInfo: numConsecutiveFailures = " + numConsecutiveFailures + + " timeOfFirstFailureMilliSec = " + timeOfFirstFailureMilliSec + + " timeOfLatestAttemptMilliSec = " + timeOfLatestAttemptMilliSec + + " exclusivelyRetringInspiteOfFastFail = " + exclusivelyRetringInspiteOfFastFail.get(); } FailureInfo(long firstFailureTime) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java index 6b0e79096fc9..aa701c848d7a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -78,8 +78,7 @@ public boolean isRetryDespiteFastFailMode() { return retryDespiteFastFailMode; } - public void setCouldNotCommunicateWithServer( - MutableBoolean couldNotCommunicateWithServer) { + public void setCouldNotCommunicateWithServer(MutableBoolean couldNotCommunicateWithServer) { this.couldNotCommunicateWithServer = couldNotCommunicateWithServer; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java index b0eea86a8bc7..ff2abb9354e9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FlushRegionCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.HBaseRpcController; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; @@ -28,6 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse; @@ -54,7 +53,7 @@ public FlushRegionCallable(ClusterConnection connection, RpcControllerFactory rpcControllerFactory, RegionInfo regionInfo, boolean writeFlushWalMarker) { this(connection, rpcControllerFactory, regionInfo.getTable(), regionInfo.getRegionName(), - regionInfo.getStartKey(), writeFlushWalMarker); + regionInfo.getStartKey(), writeFlushWalMarker); } @Override @@ -74,11 +73,8 @@ protected FlushRegionResponse call(HBaseRpcController controller) throws Excepti LOG.info("Skipping flush region, because the located region " + Bytes.toStringBinary(location.getRegionInfo().getRegionName()) + " is different than " + " requested region " + Bytes.toStringBinary(regionName)); - return FlushRegionResponse.newBuilder() - .setLastFlushTime(EnvironmentEdgeManager.currentTime()) - .setFlushed(false) - .setWroteFlushWalMarker(false) - .build(); + return FlushRegionResponse.newBuilder().setLastFlushTime(EnvironmentEdgeManager.currentTime()) + .setFlushed(false).setWroteFlushWalMarker(false).build(); } FlushRegionRequest request = diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index a671b9f6b269..5d400c230cbd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +17,6 @@ */ package org.apache.hadoop.hbase.client; - import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -29,34 +27,33 @@ import java.util.Set; import java.util.TreeMap; import java.util.TreeSet; - import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Used to perform Get operations on a single row. *

    - * To get everything for a row, instantiate a Get object with the row to get. - * To further narrow the scope of what to Get, use the methods below. + * To get everything for a row, instantiate a Get object with the row to get. To further narrow the + * scope of what to Get, use the methods below. *

    - * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} - * for each family to retrieve. + * To get all columns from specific families, execute {@link #addFamily(byte[]) addFamily} for each + * family to retrieve. *

    - * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} - * for each column to retrieve. + * To get specific columns, execute {@link #addColumn(byte[], byte[]) addColumn} for each column to + * retrieve. *

    - * To only retrieve columns within a specific range of version timestamps, - * execute {@link #setTimeRange(long, long) setTimeRange}. + * To only retrieve columns within a specific range of version timestamps, execute + * {@link #setTimeRange(long, long) setTimeRange}. *

    - * To only retrieve columns with a specific timestamp, execute - * {@link #setTimestamp(long) setTimestamp}. + * To only retrieve columns with a specific timestamp, execute {@link #setTimestamp(long) + * setTimestamp}. *

    * To limit the number of versions of each column to be returned, execute * {@link #setMaxVersions(int) setMaxVersions}. @@ -67,7 +64,7 @@ public class Get extends Query implements Row { private static final Logger LOG = LoggerFactory.getLogger(Get.class); - private byte [] row = null; + private byte[] row = null; private int maxVersions = 1; private boolean cacheBlocks = true; private int storeLimit = -1; @@ -75,23 +72,22 @@ public class Get extends Query implements Row { private TimeRange tr = TimeRange.allTime(); private boolean checkExistenceOnly = false; private boolean closestRowBefore = false; - private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private Map> familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** * Create a Get operation for the specified row. *

    - * If no further operations are done, this will get the latest version of - * all columns in all families of the specified row. + * If no further operations are done, this will get the latest version of all columns in all + * families of the specified row. * @param row row key */ - public Get(byte [] row) { + public Get(byte[] row) { Mutation.checkRow(row); this.row = row; } /** * Copy-constructor - * * @param get */ public Get(Get get) { @@ -109,8 +105,8 @@ public Get(Get get) { this.checkExistenceOnly = get.isCheckExistenceOnly(); this.loadColumnFamiliesOnDemand = get.getLoadColumnFamiliesOnDemandValue(); Map> fams = get.getFamilyMap(); - for (Map.Entry> entry : fams.entrySet()) { - byte [] fam = entry.getKey(); + for (Map.Entry> entry : fams.entrySet()) { + byte[] fam = entry.getKey(); NavigableSet cols = entry.getValue(); if (cols != null && cols.size() > 0) { for (byte[] col : cols) { @@ -187,7 +183,7 @@ public Get setClosestRowBefore(boolean closestRowBefore) { * @param family family name * @return the Get object */ - public Get addFamily(byte [] family) { + public Get addFamily(byte[] family) { familyMap.remove(family); familyMap.put(family, null); return this; @@ -201,9 +197,9 @@ public Get addFamily(byte [] family) { * @param qualifier column qualifier * @return the Get objec */ - public Get addColumn(byte [] family, byte [] qualifier) { - NavigableSet set = familyMap.get(family); - if(set == null) { + public Get addColumn(byte[] family, byte[] qualifier) { + NavigableSet set = familyMap.get(family); + if (set == null) { set = new TreeSet<>(Bytes.BYTES_COMPARATOR); familyMap.put(family, set); } @@ -215,8 +211,7 @@ public Get addColumn(byte [] family, byte [] qualifier) { } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp). + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive * @throws IOException @@ -231,8 +226,8 @@ public Get setTimeRange(long minStamp, long maxStamp) throws IOException { * Get versions of columns with the specified timestamp. * @param timestamp version timestamp * @return this for invocation chaining - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #setTimestamp(long)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #setTimestamp(long)} instead */ @Deprecated public Get setTimeStamp(long timestamp) throws IOException { @@ -247,7 +242,7 @@ public Get setTimeStamp(long timestamp) throws IOException { public Get setTimestamp(long timestamp) { try { tr = new TimeRange(timestamp, timestamp + 1); - } catch(Exception e) { + } catch (Exception e) { // This should never happen, unless integer overflow or something extremely wrong... LOG.error("TimeRange failed, likely caused by integer overflow. ", e); throw e; @@ -256,7 +251,8 @@ public Get setTimestamp(long timestamp) { return this; } - @Override public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { + @Override + public Get setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { return (Get) super.setColumnFamilyTimeRange(cf, minStamp, maxStamp); } @@ -344,12 +340,9 @@ public Get setFilter(Filter filter) { /** * Set whether blocks should be cached for this Get. *

    - * This is true by default. When true, default settings of the table and - * family are used (this will never override caching blocks if the block - * cache is disabled for that family or entirely). - * - * @param cacheBlocks if false, default settings are overridden and blocks - * will not be cached + * This is true by default. When true, default settings of the table and family are used (this + * will never override caching blocks if the block cache is disabled for that family or entirely). + * @param cacheBlocks if false, default settings are overridden and blocks will not be cached */ public Get setCacheBlocks(boolean cacheBlocks) { this.cacheBlocks = cacheBlocks; @@ -358,8 +351,7 @@ public Get setCacheBlocks(boolean cacheBlocks) { /** * Get whether blocks should be cached for this Get. - * @return true if default caching should be used, false if blocks should not - * be cached + * @return true if default caching should be used, false if blocks should not be cached */ public boolean getCacheBlocks() { return cacheBlocks; @@ -370,7 +362,7 @@ public boolean getCacheBlocks() { * @return row */ @Override - public byte [] getRow() { + public byte[] getRow() { return this.row; } @@ -383,8 +375,7 @@ public int getMaxVersions() { } /** - * Method for retrieving the get's maximum number of values - * to return per Column Family + * Method for retrieving the get's maximum number of values to return per Column Family * @return the maximum number of values to fetch per CF */ public int getMaxResultsPerColumnFamily() { @@ -392,8 +383,7 @@ public int getMaxResultsPerColumnFamily() { } /** - * Method for retrieving the get's offset per row per column - * family (#kvs to be skipped) + * Method for retrieving the get's offset per row per column family (#kvs to be skipped) * @return the row offset */ public int getRowOffsetPerColumnFamily() { @@ -436,14 +426,13 @@ public boolean hasFamilies() { * Method for retrieving the get's familyMap * @return familyMap */ - public Map> getFamilyMap() { + public Map> getFamilyMap() { return this.familyMap; } /** - * Compile the table and column family (i.e. schema) information - * into a String. Useful for parsing and aggregation by debugging, - * logging, and administration tools. + * Compile the table and column family (i.e. schema) information into a String. Useful for parsing + * and aggregation by debugging, logging, and administration tools. * @return Map */ @Override @@ -451,17 +440,16 @@ public Map getFingerprint() { Map map = new HashMap<>(); List families = new ArrayList<>(this.familyMap.entrySet().size()); map.put("families", families); - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. * @param maxCols a limit on the number of columns output prior to truncation * @return Map */ @@ -483,11 +471,10 @@ public Map toMap(int maxCols) { map.put("timeRange", timeRange); int colCount = 0; // iterate through affected families and add details - for (Map.Entry> entry : - this.familyMap.entrySet()) { + for (Map.Entry> entry : this.familyMap.entrySet()) { List familyList = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), familyList); - if(entry.getValue() == null) { + if (entry.getValue() == null) { colCount++; --maxCols; familyList.add("ALL"); @@ -496,7 +483,7 @@ public Map toMap(int maxCols) { if (maxCols <= 0) { continue; } - for (byte [] column : entry.getValue()) { + for (byte[] column : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -515,16 +502,16 @@ public Map toMap(int maxCols) { return map; } - //Row + // Row @Override public int compareTo(Row other) { - // TODO: This is wrong. Can't have two gets the same just because on same row. + // TODO: This is wrong. Can't have two gets the same just because on same row. return Bytes.compareTo(this.getRow(), other.getRow()); } @Override public int hashCode() { - // TODO: This is wrong. Can't have two gets the same just because on same row. But it + // TODO: This is wrong. Can't have two gets the same just because on same row. But it // matches how equals works currently and gets rid of the findbugs warning. return Bytes.hashCode(this.getRow()); } @@ -538,7 +525,7 @@ public boolean equals(Object obj) { return false; } Row other = (Row) obj; - // TODO: This is wrong. Can't have two gets the same just because on same row. + // TODO: This is wrong. Can't have two gets the same just because on same row. return compareTo(other) == 0; } @@ -579,7 +566,7 @@ public Get setReplicaId(int Id) { @Override public Get setIsolationLevel(IsolationLevel level) { - return (Get) super.setIsolationLevel(level); + return (Get) super.setIsolationLevel(level); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 6cf018468ebb..5c16a7c99a80 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -237,17 +237,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos; /** - * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that - * this is an HBase-internal class as defined in + * HBaseAdmin is no longer a client API. It is marked InterfaceAudience.Private indicating that this + * is an HBase-internal class as defined in * https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/InterfaceClassification.html * There are no guarantees for backwards source / binary compatibility and methods or class can - * change or go away without deprecation. - * Use {@link Connection#getAdmin()} to obtain an instance of {@link Admin} instead of constructing - * an HBaseAdmin directly. - * - *

    Connection should be an unmanaged connection obtained via + * change or go away without deprecation. Use {@link Connection#getAdmin()} to obtain an instance of + * {@link Admin} instead of constructing an HBaseAdmin directly. + *

    + * Connection should be an unmanaged connection obtained via * {@link ConnectionFactory#createConnection(Configuration)} - * * @see ConnectionFactory * @see Connection * @see Admin @@ -287,16 +285,15 @@ public int getSyncWaitTimeout() { this.connection = connection; // TODO: receive ConnectionConfiguration here rather than re-parsing these configs every time. - this.pause = this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE, - HConstants.DEFAULT_HBASE_CLIENT_PAUSE); + this.pause = + this.conf.getLong(HConstants.HBASE_CLIENT_PAUSE, HConstants.DEFAULT_HBASE_CLIENT_PAUSE); this.numRetries = this.conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); this.operationTimeout = this.conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); - this.rpcTimeout = this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); - this.syncWaitTimeout = this.conf.getInt( - "hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + this.rpcTimeout = + this.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.syncWaitTimeout = this.conf.getInt("hbase.client.sync.wait.timeout.msec", 10 * 60000); // 10min this.getProcedureTimeout = this.conf.getInt("hbase.client.procedure.future.get.timeout.msec", 10 * 60000); // 10min @@ -320,7 +317,7 @@ public boolean isAborted() { @Override public boolean abortProcedure(final long procId, final boolean mayInterruptIfRunning) - throws IOException { + throws IOException { return get(abortProcedureAsync(procId, mayInterruptIfRunning), this.syncWaitTimeout, TimeUnit.MILLISECONDS); } @@ -328,44 +325,43 @@ public boolean abortProcedure(final long procId, final boolean mayInterruptIfRun @Override public Future abortProcedureAsync(final long procId, final boolean mayInterruptIfRunning) throws IOException { - Boolean abortProcResponse = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - @Override - protected AbortProcedureResponse rpcCall() throws Exception { - AbortProcedureRequest abortProcRequest = - AbortProcedureRequest.newBuilder().setProcId(procId).build(); - return master.abortProcedure(getRpcController(), abortProcRequest); - } - }).getIsProcedureAborted(); + Boolean abortProcResponse = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + @Override + protected AbortProcedureResponse rpcCall() throws Exception { + AbortProcedureRequest abortProcRequest = + AbortProcedureRequest.newBuilder().setProcId(procId).build(); + return master.abortProcedure(getRpcController(), abortProcRequest); + } + }).getIsProcedureAborted(); return new AbortProcedureFuture(this, procId, abortProcResponse); } @Override public List listTableDescriptors() throws IOException { - return listTableDescriptors((Pattern)null, false); + return listTableDescriptors((Pattern) null, false); } @Override public List listTableDescriptors(Pattern pattern, boolean includeSysTables) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); - return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), - req)); - } - }); + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); + return ProtobufUtil + .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)); + } + }); } @Override public TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException { return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, - operationTimeout, rpcTimeout); + operationTimeout, rpcTimeout); } @Override @@ -379,39 +375,38 @@ public Future modifyTableAsync(TableDescriptor td) throws IOException { protected ModifyTableResponse rpcCall() throws Exception { setPriority(td.getTableName()); ModifyTableRequest request = - RequestConverter.buildModifyTableRequest(td.getTableName(), td, nonceGroup, nonce); + RequestConverter.buildModifyTableRequest(td.getTableName(), td, nonceGroup, nonce); return master.modifyTable(getRpcController(), request); } }); return new ModifyTableFuture(this, td.getTableName(), response); } - @Override public Future modifyTableStoreFileTrackerAsync(TableName tableName, String dstSFT) - throws IOException { + throws IOException { ModifyTableStoreFileTrackerResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - long nonceGroup = ng.getNonceGroup(); - long nonce = ng.newNonce(); + executeCallable(new MasterCallable(getConnection(), + getRpcControllerFactory()) { + long nonceGroup = ng.getNonceGroup(); + long nonce = ng.newNonce(); - @Override - protected ModifyTableStoreFileTrackerResponse rpcCall() throws Exception { - setPriority(tableName); - ModifyTableStoreFileTrackerRequest request = RequestConverter - .buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, nonceGroup, nonce); - return master.modifyTableStoreFileTracker(getRpcController(), request); - } - }); + @Override + protected ModifyTableStoreFileTrackerResponse rpcCall() throws Exception { + setPriority(tableName); + ModifyTableStoreFileTrackerRequest request = RequestConverter + .buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, nonceGroup, nonce); + return master.modifyTableStoreFileTracker(getRpcController(), request); + } + }); return new ModifyTablerStoreFileTrackerFuture(this, tableName, response); } private static class ModifyTablerStoreFileTrackerFuture extends ModifyTableFuture { public ModifyTablerStoreFileTrackerFuture(HBaseAdmin admin, TableName tableName, - ModifyTableStoreFileTrackerResponse response) { + ModifyTableStoreFileTrackerResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -422,33 +417,32 @@ public String getOperationType() { @Override public List listTableDescriptorsByNamespace(byte[] name) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - return master.listTableDescriptorsByNamespace(getRpcController(), + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + return master + .listTableDescriptorsByNamespace(getRpcController(), ListTableDescriptorsByNamespaceRequest.newBuilder() - .setNamespaceName(Bytes.toString(name)).build()) - .getTableSchemaList() - .stream() - .map(ProtobufUtil::toTableDescriptor) - .collect(Collectors.toList()); - } - }); + .setNamespaceName(Bytes.toString(name)).build()) + .getTableSchemaList().stream().map(ProtobufUtil::toTableDescriptor) + .collect(Collectors.toList()); + } + }); } @Override public List listTableDescriptors(List tableNames) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected List rpcCall() throws Exception { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableNames); - return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), - req)); - } - }); + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected List rpcCall() throws Exception { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableNames); + return ProtobufUtil + .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)); + } + }); } @Override @@ -471,9 +465,7 @@ public List getRegions(TableName tableName) throws IOException { private static class AbortProcedureFuture extends ProcedureFuture { private boolean isAbortInProgress; - public AbortProcedureFuture( - final HBaseAdmin admin, - final Long procId, + public AbortProcedureFuture(final HBaseAdmin admin, final Long procId, final Boolean abortProcResponse) { super(admin, procId); this.isAbortInProgress = abortProcResponse; @@ -508,7 +500,7 @@ protected Boolean rpcCall(int callTimeout) throws Exception { @Override public HTableDescriptor[] listTables() throws IOException { - return listTables((Pattern)null, false); + return listTables((Pattern) null, false); } @Override @@ -524,27 +516,27 @@ public HTableDescriptor[] listTables(String regex) throws IOException { @Override public HTableDescriptor[] listTables(final Pattern pattern, final boolean includeSysTables) throws IOException { - return executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - @Override - protected HTableDescriptor[] rpcCall() throws Exception { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); - return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), - req)).stream().map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); - } - }); + return executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + @Override + protected HTableDescriptor[] rpcCall() throws Exception { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables); + return ProtobufUtil + .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream() + .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); + } + }); } @Override - public HTableDescriptor[] listTables(String regex, boolean includeSysTables) - throws IOException { + public HTableDescriptor[] listTables(String regex, boolean includeSysTables) throws IOException { return listTables(Pattern.compile(regex), includeSysTables); } @Override public TableName[] listTableNames() throws IOException { - return listTableNames((Pattern)null, false); + return listTableNames((Pattern) null, false); } @Override @@ -555,16 +547,16 @@ public TableName[] listTableNames(String regex) throws IOException { @Override public TableName[] listTableNames(final Pattern pattern, final boolean includeSysTables) throws IOException { - return executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - @Override - protected TableName[] rpcCall() throws Exception { - GetTableNamesRequest req = - RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables); - return ProtobufUtil.getTableNameArray(master.getTableNames(getRpcController(), req) - .getTableNamesList()); - } - }); + return executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + @Override + protected TableName[] rpcCall() throws Exception { + GetTableNamesRequest req = + RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables); + return ProtobufUtil + .getTableNameArray(master.getTableNames(getRpcController(), req).getTableNamesList()); + } + }); } @Override @@ -576,7 +568,7 @@ public TableName[] listTableNames(final String regex, final boolean includeSysTa @Override public HTableDescriptor getTableDescriptor(final TableName tableName) throws IOException { return getHTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, - operationTimeout, rpcTimeout); + operationTimeout, rpcTimeout); } static TableDescriptor getTableDescriptor(final TableName tableName, Connection connection, @@ -585,17 +577,17 @@ static TableDescriptor getTableDescriptor(final TableName tableName, Connection if (tableName == null) return null; TableDescriptor td = executeCallable(new MasterCallable(connection, rpcControllerFactory) { - @Override - protected TableDescriptor rpcCall() throws Exception { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); - GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); - if (!htds.getTableSchemaList().isEmpty()) { - return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); - } - return null; - } - }, rpcCallerFactory, operationTimeout, rpcTimeout); + @Override + protected TableDescriptor rpcCall() throws Exception { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableName); + GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); + if (!htds.getTableSchemaList().isEmpty()) { + return ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0)); + } + return null; + } + }, rpcCallerFactory, operationTimeout, rpcTimeout); if (td != null) { return td; } @@ -603,9 +595,8 @@ protected TableDescriptor rpcCall() throws Exception { } /** - * @deprecated since 2.0 version and will be removed in 3.0 version. - * use {@link #getTableDescriptor(TableName, - * Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)} + * @deprecated since 2.0 version and will be removed in 3.0 version. use + * {@link #getTableDescriptor(TableName, Connection, RpcRetryingCallerFactory,RpcControllerFactory,int,int)} */ @Deprecated static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection, @@ -677,9 +668,8 @@ public Future createTableAsync(final TableDescriptor desc, final byte[][] "Empty split key must not be passed in the split keys."); } if (lastKey != null && Bytes.equals(splitKey, lastKey)) { - throw new IllegalArgumentException("All split keys must be unique, " + - "found duplicate: " + Bytes.toStringBinary(splitKey) + - ", " + Bytes.toStringBinary(lastKey)); + throw new IllegalArgumentException("All split keys must be unique, " + "found duplicate: " + + Bytes.toStringBinary(splitKey) + ", " + Bytes.toStringBinary(lastKey)); } lastKey = splitKey; } @@ -689,11 +679,12 @@ public Future createTableAsync(final TableDescriptor desc, final byte[][] new MasterCallable(getConnection(), getRpcControllerFactory()) { Long nonceGroup = ng.getNonceGroup(); Long nonce = ng.newNonce(); + @Override protected CreateTableResponse rpcCall() throws Exception { setPriority(desc.getTableName()); - CreateTableRequest request = RequestConverter.buildCreateTableRequest( - desc, splitKeys, nonceGroup, nonce); + CreateTableRequest request = + RequestConverter.buildCreateTableRequest(desc, splitKeys, nonceGroup, nonce); return master.createTable(getRpcController(), request); } }); @@ -707,7 +698,7 @@ private static class CreateTableFuture extends TableFuture { public CreateTableFuture(final HBaseAdmin admin, final TableDescriptor desc, final byte[][] splitKeys, final CreateTableResponse response) { super(admin, desc.getTableName(), - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); this.splitKeys = splitKeys; this.desc = desc; } @@ -736,11 +727,12 @@ public Future deleteTableAsync(final TableName tableName) throws IOExcepti new MasterCallable(getConnection(), getRpcControllerFactory()) { Long nonceGroup = ng.getNonceGroup(); Long nonce = ng.newNonce(); + @Override protected DeleteTableResponse rpcCall() throws Exception { setPriority(tableName); DeleteTableRequest req = - RequestConverter.buildDeleteTableRequest(tableName, nonceGroup,nonce); + RequestConverter.buildDeleteTableRequest(tableName, nonceGroup, nonce); return master.deleteTable(getRpcController(), req); } }); @@ -751,7 +743,7 @@ private static class DeleteTableFuture extends TableFuture { public DeleteTableFuture(final HBaseAdmin admin, final TableName tableName, final DeleteTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -760,8 +752,7 @@ public String getOperationType() { } @Override - protected Void waitOperationResult(final long deadlineTs) - throws IOException, TimeoutException { + protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { waitTableNotFound(deadlineTs); return null; } @@ -781,12 +772,9 @@ public HTableDescriptor[] deleteTables(String regex) throws IOException { } /** - * Delete tables matching the passed in pattern and wait on completion. - * - * Warning: Use this method carefully, there is no prompting and the effect is - * immediate. Consider using {@link #listTables(java.util.regex.Pattern) } and - * {@link #deleteTable(TableName)} - * + * Delete tables matching the passed in pattern and wait on completion. Warning: Use this method + * carefully, there is no prompting and the effect is immediate. Consider using + * {@link #listTables(java.util.regex.Pattern) } and {@link #deleteTable(TableName)} * @param pattern The pattern to match table names against * @return Table descriptors for tables that couldn't be deleted * @throws IOException if a remote or network exception occurs @@ -808,20 +796,20 @@ public HTableDescriptor[] deleteTables(Pattern pattern) throws IOException { @Override public Future truncateTableAsync(final TableName tableName, final boolean preserveSplits) throws IOException { - TruncateTableResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected TruncateTableResponse rpcCall() throws Exception { - setPriority(tableName); - LOG.info("Started truncating " + tableName); - TruncateTableRequest req = RequestConverter.buildTruncateTableRequest( - tableName, preserveSplits, nonceGroup, nonce); - return master.truncateTable(getRpcController(), req); - } - }); + TruncateTableResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected TruncateTableResponse rpcCall() throws Exception { + setPriority(tableName); + LOG.info("Started truncating " + tableName); + TruncateTableRequest req = RequestConverter.buildTruncateTableRequest(tableName, + preserveSplits, nonceGroup, nonce); + return master.truncateTable(getRpcController(), req); + } + }); return new TruncateTableFuture(this, tableName, preserveSplits, response); } @@ -831,7 +819,7 @@ private static class TruncateTableFuture extends TableFuture { public TruncateTableFuture(final HBaseAdmin admin, final TableName tableName, final boolean preserveSplits, final TruncateTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); this.preserveSplits = preserveSplits; } @@ -872,13 +860,14 @@ public Future enableTableAsync(final TableName tableName) throws IOExcepti new MasterCallable(getConnection(), getRpcControllerFactory()) { Long nonceGroup = ng.getNonceGroup(); Long nonce = ng.newNonce(); + @Override protected EnableTableResponse rpcCall() throws Exception { setPriority(tableName); LOG.info("Started enable of " + tableName); EnableTableRequest req = RequestConverter.buildEnableTableRequest(tableName, nonceGroup, nonce); - return master.enableTable(getRpcController(),req); + return master.enableTable(getRpcController(), req); } }); return new EnableTableFuture(this, tableName, response); @@ -888,7 +877,7 @@ private static class EnableTableFuture extends TableFuture { public EnableTableFuture(final HBaseAdmin admin, final TableName tableName, final EnableTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -931,13 +920,13 @@ public Future disableTableAsync(final TableName tableName) throws IOExcept new MasterCallable(getConnection(), getRpcControllerFactory()) { Long nonceGroup = ng.getNonceGroup(); Long nonce = ng.newNonce(); + @Override protected DisableTableResponse rpcCall() throws Exception { setPriority(tableName); LOG.info("Started disable of " + tableName); DisableTableRequest req = - RequestConverter.buildDisableTableRequest( - tableName, nonceGroup, nonce); + RequestConverter.buildDisableTableRequest(tableName, nonceGroup, nonce); return master.disableTable(getRpcController(), req); } }); @@ -948,7 +937,7 @@ private static class DisableTableFuture extends TableFuture { public DisableTableFuture(final HBaseAdmin admin, final TableName tableName, final DisableTableResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1017,19 +1006,19 @@ public boolean isTableAvailable(TableName tableName, byte[][] splitKeys) throws @Override public Pair getAlterStatus(final TableName tableName) throws IOException { - return executeCallable(new MasterCallable>(getConnection(), - getRpcControllerFactory()) { - @Override - protected Pair rpcCall() throws Exception { - setPriority(tableName); - GetSchemaAlterStatusRequest req = RequestConverter - .buildGetSchemaAlterStatusRequest(tableName); - GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req); - Pair pair = new Pair<>(ret.getYetToUpdateRegions(), - ret.getTotalRegions()); - return pair; - } - }); + return executeCallable( + new MasterCallable>(getConnection(), getRpcControllerFactory()) { + @Override + protected Pair rpcCall() throws Exception { + setPriority(tableName); + GetSchemaAlterStatusRequest req = + RequestConverter.buildGetSchemaAlterStatusRequest(tableName); + GetSchemaAlterStatusResponse ret = master.getSchemaAlterStatus(getRpcController(), req); + Pair pair = + new Pair<>(ret.getYetToUpdateRegions(), ret.getTotalRegions()); + return pair; + } + }); } @Override @@ -1040,27 +1029,27 @@ public Pair getAlterStatus(final byte[] tableName) throws IOEx @Override public Future addColumnFamilyAsync(final TableName tableName, final ColumnFamilyDescriptor columnFamily) throws IOException { - AddColumnResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected AddColumnResponse rpcCall() throws Exception { - setPriority(tableName); - AddColumnRequest req = - RequestConverter.buildAddColumnRequest(tableName, columnFamily, nonceGroup, nonce); - return master.addColumn(getRpcController(), req); - } - }); + AddColumnResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected AddColumnResponse rpcCall() throws Exception { + setPriority(tableName); + AddColumnRequest req = + RequestConverter.buildAddColumnRequest(tableName, columnFamily, nonceGroup, nonce); + return master.addColumn(getRpcController(), req); + } + }); return new AddColumnFamilyFuture(this, tableName, response); } private static class AddColumnFamilyFuture extends ModifyTableFuture { public AddColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, final AddColumnResponse response) { - super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() - : null); + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1072,7 +1061,7 @@ public String getOperationType() { /** * {@inheritDoc} * @deprecated Since 2.0. Will be removed in 3.0. Use - * {@link #deleteColumnFamily(TableName, byte[])} instead. + * {@link #deleteColumnFamily(TableName, byte[])} instead. */ @Override @Deprecated @@ -1084,28 +1073,27 @@ public void deleteColumn(final TableName tableName, final byte[] columnFamily) @Override public Future deleteColumnFamilyAsync(final TableName tableName, final byte[] columnFamily) throws IOException { - DeleteColumnResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected DeleteColumnResponse rpcCall() throws Exception { - setPriority(tableName); - DeleteColumnRequest req = - RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, - nonceGroup, nonce); - return master.deleteColumn(getRpcController(), req); - } - }); + DeleteColumnResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected DeleteColumnResponse rpcCall() throws Exception { + setPriority(tableName); + DeleteColumnRequest req = + RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, nonceGroup, nonce); + return master.deleteColumn(getRpcController(), req); + } + }); return new DeleteColumnFamilyFuture(this, tableName, response); } private static class DeleteColumnFamilyFuture extends ModifyTableFuture { public DeleteColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, final DeleteColumnResponse response) { - super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() - : null); + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1116,7 +1104,7 @@ public String getOperationType() { @Override public Future modifyColumnFamilyAsync(final TableName tableName, - final ColumnFamilyDescriptor columnFamily) throws IOException { + final ColumnFamilyDescriptor columnFamily) throws IOException { ModifyColumnResponse response = executeCallable( new MasterCallable(getConnection(), getRpcControllerFactory()) { long nonceGroup = ng.getNonceGroup(); @@ -1126,7 +1114,7 @@ public Future modifyColumnFamilyAsync(final TableName tableName, protected ModifyColumnResponse rpcCall() throws Exception { setPriority(tableName); ModifyColumnRequest req = - RequestConverter.buildModifyColumnRequest(tableName, columnFamily, nonceGroup, nonce); + RequestConverter.buildModifyColumnRequest(tableName, columnFamily, nonceGroup, nonce); return master.modifyColumn(getRpcController(), req); } }); @@ -1136,8 +1124,8 @@ protected ModifyColumnResponse rpcCall() throws Exception { private static class ModifyColumnFamilyFuture extends ModifyTableFuture { public ModifyColumnFamilyFuture(final HBaseAdmin admin, final TableName tableName, final ModifyColumnResponse response) { - super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() - : null); + super(admin, tableName, + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1148,29 +1136,30 @@ public String getOperationType() { @Override public Future modifyColumnFamilyStoreFileTrackerAsync(TableName tableName, byte[] family, - String dstSFT) throws IOException { + String dstSFT) throws IOException { ModifyColumnStoreFileTrackerResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - long nonceGroup = ng.getNonceGroup(); - long nonce = ng.newNonce(); + executeCallable(new MasterCallable(getConnection(), + getRpcControllerFactory()) { + long nonceGroup = ng.getNonceGroup(); + long nonce = ng.newNonce(); - @Override - protected ModifyColumnStoreFileTrackerResponse rpcCall() throws Exception { - setPriority(tableName); - ModifyColumnStoreFileTrackerRequest req = RequestConverter - .buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, nonceGroup, nonce); - return master.modifyColumnStoreFileTracker(getRpcController(), req); - } - }); + @Override + protected ModifyColumnStoreFileTrackerResponse rpcCall() throws Exception { + setPriority(tableName); + ModifyColumnStoreFileTrackerRequest req = + RequestConverter.buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, + nonceGroup, nonce); + return master.modifyColumnStoreFileTracker(getRpcController(), req); + } + }); return new ModifyColumnFamilyStoreFileTrackerFuture(this, tableName, response); } private static class ModifyColumnFamilyStoreFileTrackerFuture extends ModifyTableFuture { public ModifyColumnFamilyStoreFileTrackerFuture(HBaseAdmin admin, TableName tableName, - final ModifyColumnStoreFileTrackerResponse response) { + final ModifyColumnStoreFileTrackerResponse response) { super(admin, tableName, - (response != null && response.hasProcId()) ? response.getProcId() : null); + (response != null && response.hasProcId()) ? response.getProcId() : null); } @Override @@ -1187,7 +1176,7 @@ public void closeRegion(final String regionName, final String unused) throws IOE @Deprecated @Override - public void closeRegion(final byte [] regionName, final String unused) throws IOException { + public void closeRegion(final byte[] regionName, final String unused) throws IOException { unassign(regionName, true); } @@ -1209,8 +1198,8 @@ public void closeRegion(final ServerName unused, final HRegionInfo hri) throws I * @param sn * @return List of {@link HRegionInfo}. * @throws IOException if a remote or network exception occurs - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegions(ServerName)}. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use + * {@link #getRegions(ServerName)}. */ @Deprecated @Override @@ -1257,13 +1246,12 @@ public void flushRegion(final byte[] regionName, byte[] columnFamily) throws IOE } private void flush(AdminService.BlockingInterface admin, final RegionInfo info, - byte[] columnFamily) - throws IOException { + byte[] columnFamily) throws IOException { ProtobufUtil.call(() -> { // TODO: There is no timeout on this controller. Set one! HBaseRpcController controller = rpcControllerFactory.newController(); FlushRegionRequest request = - RequestConverter.buildFlushRegionRequest(info.getRegionName(), columnFamily, false); + RequestConverter.buildFlushRegionRequest(info.getRegionName(), columnFamily, false); admin.flushRegion(controller, request); return null; }); @@ -1280,14 +1268,12 @@ public void flushRegionServer(ServerName serverName) throws IOException { * {@inheritDoc} */ @Override - public void compact(final TableName tableName) - throws IOException { + public void compact(final TableName tableName) throws IOException { compact(tableName, null, false, CompactType.NORMAL); } @Override - public void compactRegion(final byte[] regionName) - throws IOException { + public void compactRegion(final byte[] regionName) throws IOException { compactRegion(regionName, null, false); } @@ -1295,8 +1281,7 @@ public void compactRegion(final byte[] regionName) * {@inheritDoc} */ @Override - public void compact(final TableName tableName, final byte[] columnFamily) - throws IOException { + public void compact(final TableName tableName, final byte[] columnFamily) throws IOException { compact(tableName, columnFamily, false, CompactType.NORMAL); } @@ -1304,36 +1289,35 @@ public void compact(final TableName tableName, final byte[] columnFamily) * {@inheritDoc} */ @Override - public void compactRegion(final byte[] regionName, final byte[] columnFamily) - throws IOException { + public void compactRegion(final byte[] regionName, final byte[] columnFamily) throws IOException { compactRegion(regionName, columnFamily, false); } @Override - public Map compactionSwitch(boolean switchState, List - serverNamesList) throws IOException { + public Map compactionSwitch(boolean switchState, + List serverNamesList) throws IOException { List serverList = new ArrayList<>(); if (serverNamesList.isEmpty()) { ClusterMetrics status = getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS)); serverList.addAll(status.getLiveServerMetrics().keySet()); } else { - for (String regionServerName: serverNamesList) { + for (String regionServerName : serverNamesList) { ServerName serverName = null; try { serverName = ServerName.valueOf(regionServerName); } catch (Exception e) { - throw new IllegalArgumentException(String.format("Invalid ServerName format: %s", - regionServerName)); + throw new IllegalArgumentException( + String.format("Invalid ServerName format: %s", regionServerName)); } if (serverName == null) { - throw new IllegalArgumentException(String.format("Null ServerName: %s", - regionServerName)); + throw new IllegalArgumentException( + String.format("Null ServerName: %s", regionServerName)); } serverList.add(serverName); } } Map res = new HashMap<>(serverList.size()); - for (ServerName serverName: serverList) { + for (ServerName serverName : serverList) { boolean prev_state = switchCompact(this.connection.getAdmin(serverName), switchState); res.put(serverName, prev_state); } @@ -1343,7 +1327,8 @@ public Map compactionSwitch(boolean switchState, List() { - @Override protected Boolean rpcCall(int callTimeout) throws Exception { + @Override + protected Boolean rpcCall(int callTimeout) throws Exception { HBaseRpcController controller = rpcControllerFactory.newController(); CompactionSwitchRequest request = CompactionSwitchRequest.newBuilder().setEnabled(onOrOff).build(); @@ -1369,14 +1354,12 @@ public void majorCompactRegionServer(final ServerName serverName) throws IOExcep } @Override - public void majorCompact(final TableName tableName) - throws IOException { + public void majorCompact(final TableName tableName) throws IOException { compact(tableName, null, true, CompactType.NORMAL); } @Override - public void majorCompactRegion(final byte[] regionName) - throws IOException { + public void majorCompactRegion(final byte[] regionName) throws IOException { compactRegion(regionName, null, true); } @@ -1385,36 +1368,34 @@ public void majorCompactRegion(final byte[] regionName) */ @Override public void majorCompact(final TableName tableName, final byte[] columnFamily) - throws IOException { + throws IOException { compact(tableName, columnFamily, true, CompactType.NORMAL); } @Override public void majorCompactRegion(final byte[] regionName, final byte[] columnFamily) - throws IOException { + throws IOException { compactRegion(regionName, columnFamily, true); } /** - * Compact a table. - * Asynchronous operation. - * + * Compact a table. Asynchronous operation. * @param tableName table or region to compact * @param columnFamily column family within a table or region * @param major True if we are to do a major compaction. * @param compactType {@link org.apache.hadoop.hbase.client.CompactType} * @throws IOException if a remote or network exception occurs */ - private void compact(final TableName tableName, final byte[] columnFamily,final boolean major, - CompactType compactType) throws IOException { + private void compact(final TableName tableName, final byte[] columnFamily, final boolean major, + CompactType compactType) throws IOException { switch (compactType) { case MOB: compact(this.connection.getAdminForMaster(), RegionInfo.createMobRegionInfo(tableName), - major, columnFamily); + major, columnFamily); break; case NORMAL: checkTableExists(tableName); - for (HRegionLocation loc :connection.locateRegions(tableName, false, false)) { + for (HRegionLocation loc : connection.locateRegions(tableName, false, false)) { ServerName sn = loc.getServerName(); if (sn == null) { continue; @@ -1423,8 +1404,8 @@ private void compact(final TableName tableName, final byte[] columnFamily,final compact(this.connection.getAdmin(sn), loc.getRegion(), major, columnFamily); } catch (NotServingRegionException e) { if (LOG.isDebugEnabled()) { - LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() + - ": " + StringUtils.stringifyException(e)); + LOG.debug("Trying to" + (major ? " major" : "") + " compact " + loc.getRegion() + ": " + + StringUtils.stringifyException(e)); } } } @@ -1435,9 +1416,7 @@ private void compact(final TableName tableName, final byte[] columnFamily,final } /** - * Compact an individual region. - * Asynchronous operation. - * + * Compact an individual region. Asynchronous operation. * @param regionName region to compact * @param columnFamily column family within a table or region * @param major True if we are to do a major compaction. @@ -1484,7 +1463,7 @@ public void move(final byte[] encodedRegionName, ServerName destServerName) thro protected Void rpcCall() throws Exception { setPriority(encodedRegionName); MoveRegionRequest request = - RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName); + RequestConverter.buildMoveRegionRequest(encodedRegionName, destServerName); master.moveRegion(getRpcController(), request); return null; } @@ -1492,8 +1471,8 @@ protected Void rpcCall() throws Exception { } @Override - public void assign(final byte [] regionName) throws MasterNotRunningException, - ZooKeeperConnectionException, IOException { + public void assign(final byte[] regionName) + throws MasterNotRunningException, ZooKeeperConnectionException, IOException { executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Void rpcCall() throws Exception { @@ -1507,14 +1486,13 @@ protected Void rpcCall() throws Exception { } @Override - public void unassign(final byte [] regionName) throws IOException { + public void unassign(final byte[] regionName) throws IOException { final byte[] toBeUnassigned = getRegionName(regionName); executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Void rpcCall() throws Exception { setPriority(regionName); - UnassignRegionRequest request = - RequestConverter.buildUnassignRegionRequest(toBeUnassigned); + UnassignRegionRequest request = RequestConverter.buildUnassignRegionRequest(toBeUnassigned); master.unassignRegion(getRpcController(), request); return null; } @@ -1522,22 +1500,20 @@ protected Void rpcCall() throws Exception { } @Override - public void offline(final byte [] regionName) - throws IOException { + public void offline(final byte[] regionName) throws IOException { executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Void rpcCall() throws Exception { setPriority(regionName); master.offlineRegion(getRpcController(), - RequestConverter.buildOfflineRegionRequest(regionName)); + RequestConverter.buildOfflineRegionRequest(regionName)); return null; } }); } @Override - public boolean balancerSwitch(final boolean on, final boolean synchronous) - throws IOException { + public boolean balancerSwitch(final boolean on, final boolean synchronous) throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { @@ -1548,10 +1524,12 @@ protected Boolean rpcCall() throws Exception { }); } - @Override public BalanceResponse balance(BalanceRequest request) throws IOException { + @Override + public BalanceResponse balance(BalanceRequest request) throws IOException { return executeCallable( new MasterCallable(getConnection(), getRpcControllerFactory()) { - @Override protected BalanceResponse rpcCall() throws Exception { + @Override + protected BalanceResponse rpcCall() throws Exception { MasterProtos.BalanceRequest req = ProtobufUtil.toBalanceRequest(request); return ProtobufUtil.toBalanceResponse(master.balance(getRpcController(), req)); } @@ -1563,8 +1541,9 @@ public boolean isBalancerEnabled() throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { - return master.isBalancerEnabled(getRpcController(), - RequestConverter.buildIsBalancerEnabledRequest()).getEnabled(); + return master + .isBalancerEnabled(getRpcController(), RequestConverter.buildIsBalancerEnabledRequest()) + .getEnabled(); } }); } @@ -1577,22 +1556,20 @@ public CacheEvictionStats clearBlockCache(final TableName tableName) throws IOEx checkTableExists(tableName); CacheEvictionStatsBuilder cacheEvictionStats = CacheEvictionStats.builder(); List> pairs = - MetaTableAccessor.getTableRegionsAndLocations(connection, tableName); - Map> regionInfoByServerName = - pairs.stream() - .filter(pair -> !(pair.getFirst().isOffline())) - .filter(pair -> pair.getSecond() != null) - .collect(Collectors.groupingBy(pair -> pair.getSecond(), - Collectors.mapping(pair -> pair.getFirst(), Collectors.toList()))); + MetaTableAccessor.getTableRegionsAndLocations(connection, tableName); + Map> regionInfoByServerName = pairs.stream() + .filter(pair -> !(pair.getFirst().isOffline())).filter(pair -> pair.getSecond() != null) + .collect(Collectors.groupingBy(pair -> pair.getSecond(), + Collectors.mapping(pair -> pair.getFirst(), Collectors.toList()))); for (Map.Entry> entry : regionInfoByServerName.entrySet()) { CacheEvictionStats stats = clearBlockCache(entry.getKey(), entry.getValue()); cacheEvictionStats = cacheEvictionStats.append(stats); if (stats.getExceptionCount() > 0) { for (Map.Entry exception : stats.getExceptions().entrySet()) { - LOG.debug("Failed to clear block cache for " - + Bytes.toStringBinary(exception.getKey()) - + " on " + entry.getKey() + ": ", exception.getValue()); + LOG.debug("Failed to clear block cache for " + Bytes.toStringBinary(exception.getKey()) + + " on " + entry.getKey() + ": ", + exception.getValue()); } } } @@ -1603,8 +1580,7 @@ private CacheEvictionStats clearBlockCache(final ServerName sn, final List(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { - return master.normalize(getRpcController(), - RequestConverter.buildNormalizeRequest(ntfp)).getNormalizerRan(); + return master.normalize(getRpcController(), RequestConverter.buildNormalizeRequest(ntfp)) + .getNormalizerRan(); } }); } @@ -1641,8 +1617,7 @@ public boolean normalizerSwitch(final boolean on) throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Boolean rpcCall() throws Exception { - SetNormalizerRunningRequest req = - RequestConverter.buildSetNormalizerRunningRequest(on); + SetNormalizerRunningRequest req = RequestConverter.buildSetNormalizerRunningRequest(on); return master.setNormalizerRunning(getRpcController(), req).getPrevNormalizerValue(); } }); @@ -1664,8 +1639,8 @@ public int runCatalogJanitor() throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override protected Integer rpcCall() throws Exception { - return master.runCatalogScan(getRpcController(), - RequestConverter.buildCatalogScanRequest()).getScanResult(); + return master.runCatalogScan(getRpcController(), RequestConverter.buildCatalogScanRequest()) + .getScanResult(); } }); } @@ -1684,9 +1659,10 @@ protected Boolean rpcCall() throws Exception { @Override public boolean cleanerChoreSwitch(final boolean on) throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { - @Override public Boolean rpcCall() throws Exception { + @Override + public Boolean rpcCall() throws Exception { return master.setCleanerChoreRunning(getRpcController(), - RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue(); + RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue(); } }); } @@ -1694,9 +1670,11 @@ public boolean cleanerChoreSwitch(final boolean on) throws IOException { @Override public boolean runCleanerChore() throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { - @Override public Boolean rpcCall() throws Exception { - return master.runCleanerChore(getRpcController(), - RequestConverter.buildRunCleanerChoreRequest()).getCleanerChoreRan(); + @Override + public Boolean rpcCall() throws Exception { + return master + .runCleanerChore(getRpcController(), RequestConverter.buildRunCleanerChoreRequest()) + .getCleanerChoreRan(); } }); } @@ -1704,30 +1682,26 @@ public boolean runCleanerChore() throws IOException { @Override public boolean isCleanerChoreEnabled() throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { - @Override public Boolean rpcCall() throws Exception { + @Override + public Boolean rpcCall() throws Exception { return master.isCleanerChoreEnabled(getRpcController(), - RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue(); + RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue(); } }); } /** - * Merge two regions. Synchronous operation. - * Note: It is not feasible to predict the length of merge. - * Therefore, this is for internal testing only. + * Merge two regions. Synchronous operation. Note: It is not feasible to predict the length of + * merge. Therefore, this is for internal testing only. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge - * two adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent + * regions * @throws IOException if a remote or network exception occurs */ - public void mergeRegionsSync( - final byte[] nameOfRegionA, - final byte[] nameOfRegionB, + public void mergeRegionsSync(final byte[] nameOfRegionA, final byte[] nameOfRegionB, final boolean forcible) throws IOException { - get( - mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible), - syncWaitTimeout, + get(mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible), syncWaitTimeout, TimeUnit.MILLISECONDS); } @@ -1735,25 +1709,23 @@ public void mergeRegionsSync( * Merge two regions. Asynchronous operation. * @param nameOfRegionA encoded or full name of region a * @param nameOfRegionB encoded or full name of region b - * @param forcible true if do a compulsory merge, otherwise we will only merge - * two adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge two adjacent + * regions * @throws IOException if a remote or network exception occurs * @deprecated Since 2.0. Will be removed in 3.0. Use - * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. + * {@link #mergeRegionsAsync(byte[], byte[], boolean)} instead. */ @Deprecated @Override - public void mergeRegions(final byte[] nameOfRegionA, - final byte[] nameOfRegionB, final boolean forcible) - throws IOException { + public void mergeRegions(final byte[] nameOfRegionA, final byte[] nameOfRegionB, + final boolean forcible) throws IOException { mergeRegionsAsync(nameOfRegionA, nameOfRegionB, forcible); } /** * Merge two regions. Asynchronous operation. * @param nameofRegionsToMerge encoded or full name of daughter regions - * @param forcible true if do a compulsory merge, otherwise we will only merge - * adjacent regions + * @param forcible true if do a compulsory merge, otherwise we will only merge adjacent regions */ @Override public Future mergeRegionsAsync(final byte[][] nameofRegionsToMerge, final boolean forcible) @@ -1763,64 +1735,55 @@ public Future mergeRegionsAsync(final byte[][] nameofRegionsToMerge, final byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][]; for (int i = 0; i < nameofRegionsToMerge.length; i++) { encodedNameofRegionsToMerge[i] = - RegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) ? nameofRegionsToMerge[i] - : Bytes.toBytes(RegionInfo.encodeRegionName(nameofRegionsToMerge[i])); + RegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) ? nameofRegionsToMerge[i] + : Bytes.toBytes(RegionInfo.encodeRegionName(nameofRegionsToMerge[i])); } TableName tableName = null; Pair pair; - for(int i = 0; i < nameofRegionsToMerge.length; i++) { + for (int i = 0; i < nameofRegionsToMerge.length; i++) { pair = getRegion(nameofRegionsToMerge[i]); if (pair != null) { if (pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke merge on non-default regions directly"); + throw new IllegalArgumentException("Can't invoke merge on non-default regions directly"); } if (tableName == null) { tableName = pair.getFirst().getTable(); - } else if (!tableName.equals(pair.getFirst().getTable())) { - throw new IllegalArgumentException ("Cannot merge regions from two different tables " + - tableName + " and " + pair.getFirst().getTable()); + } else if (!tableName.equals(pair.getFirst().getTable())) { + throw new IllegalArgumentException("Cannot merge regions from two different tables " + + tableName + " and " + pair.getFirst().getTable()); } } else { - throw new UnknownRegionException ( - "Can't invoke merge on unknown region " - + Bytes.toStringBinary(encodedNameofRegionsToMerge[i])); + throw new UnknownRegionException("Can't invoke merge on unknown region " + + Bytes.toStringBinary(encodedNameofRegionsToMerge[i])); } } - MergeTableRegionsResponse response = - executeCallable(new MasterCallable(getConnection(), - getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected MergeTableRegionsResponse rpcCall() throws Exception { - MergeTableRegionsRequest request = RequestConverter - .buildMergeTableRegionsRequest( - encodedNameofRegionsToMerge, - forcible, - nonceGroup, - nonce); - return master.mergeTableRegions(getRpcController(), request); - } - }); + MergeTableRegionsResponse response = executeCallable( + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected MergeTableRegionsResponse rpcCall() throws Exception { + MergeTableRegionsRequest request = RequestConverter.buildMergeTableRegionsRequest( + encodedNameofRegionsToMerge, forcible, nonceGroup, nonce); + return master.mergeTableRegions(getRpcController(), request); + } + }); return new MergeTableRegionsFuture(this, tableName, response); } private static class MergeTableRegionsFuture extends TableFuture { - public MergeTableRegionsFuture( - final HBaseAdmin admin, - final TableName tableName, + public MergeTableRegionsFuture(final HBaseAdmin admin, final TableName tableName, final MergeTableRegionsResponse response) { super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() : null); } - public MergeTableRegionsFuture( - final HBaseAdmin admin, - final TableName tableName, + public MergeTableRegionsFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) { super(admin, tableName, procId); } @@ -1830,10 +1793,10 @@ public String getOperationType() { return "MERGE_REGIONS"; } } + /** - * Split one region. Synchronous operation. - * Note: It is not feasible to predict the length of split. - * Therefore, this is for internal testing only. + * Split one region. Synchronous operation. Note: It is not feasible to predict the length of + * split. Therefore, this is for internal testing only. * @param regionName encoded or full name of region * @param splitPoint key where region splits * @throws IOException if a remote or network exception occurs @@ -1856,20 +1819,18 @@ public void splitRegionSync(byte[] regionName, byte[] splitPoint, final long tim } @Override - public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) - throws IOException { - byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ? - regionName : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName)); + public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException { + byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ? regionName + : Bytes.toBytes(HRegionInfo.encodeRegionName(regionName)); Pair pair = getRegion(regionName); if (pair != null) { - if (pair.getFirst() != null && - pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { - throw new IllegalArgumentException ("Can't invoke split on non-default regions directly"); + if (pair.getFirst() != null + && pair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { + throw new IllegalArgumentException("Can't invoke split on non-default regions directly"); } } else { - throw new UnknownRegionException ( - "Can't invoke merge on unknown region " - + Bytes.toStringBinary(encodedNameofRegionToSplit)); + throw new UnknownRegionException("Can't invoke merge on unknown region " + + Bytes.toStringBinary(encodedNameofRegionToSplit)); } return splitRegionAsync(pair.getFirst(), splitPoint); @@ -1877,37 +1838,35 @@ public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) Future splitRegionAsync(RegionInfo hri, byte[] splitPoint) throws IOException { TableName tableName = hri.getTable(); - if (hri.getStartKey() != null && splitPoint != null && - Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) { + if (hri.getStartKey() != null && splitPoint != null + && Bytes.compareTo(hri.getStartKey(), splitPoint) == 0) { throw new IOException("should not give a splitkey which equals to startkey!"); } SplitTableRegionResponse response = executeCallable( - new MasterCallable(getConnection(), getRpcControllerFactory()) { - Long nonceGroup = ng.getNonceGroup(); - Long nonce = ng.newNonce(); - @Override - protected SplitTableRegionResponse rpcCall() throws Exception { - setPriority(tableName); - SplitTableRegionRequest request = RequestConverter - .buildSplitTableRegionRequest(hri, splitPoint, nonceGroup, nonce); - return master.splitRegion(getRpcController(), request); - } - }); + new MasterCallable(getConnection(), getRpcControllerFactory()) { + Long nonceGroup = ng.getNonceGroup(); + Long nonce = ng.newNonce(); + + @Override + protected SplitTableRegionResponse rpcCall() throws Exception { + setPriority(tableName); + SplitTableRegionRequest request = + RequestConverter.buildSplitTableRegionRequest(hri, splitPoint, nonceGroup, nonce); + return master.splitRegion(getRpcController(), request); + } + }); return new SplitTableRegionFuture(this, tableName, response); } private static class SplitTableRegionFuture extends TableFuture { - public SplitTableRegionFuture(final HBaseAdmin admin, - final TableName tableName, + public SplitTableRegionFuture(final HBaseAdmin admin, final TableName tableName, final SplitTableRegionResponse response) { super(admin, tableName, (response != null && response.hasProcId()) ? response.getProcId() : null); } - public SplitTableRegionFuture( - final HBaseAdmin admin, - final TableName tableName, + public SplitTableRegionFuture(final HBaseAdmin admin, final TableName tableName, final Long procId) { super(admin, tableName, procId); } @@ -1942,8 +1901,8 @@ public void split(final TableName tableName, final byte[] splitPoint) throws IOE continue; } // if a split point given, only split that particular region - if (r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID || - (splitPoint != null && !r.containsRow(splitPoint))) { + if (r.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID + || (splitPoint != null && !r.containsRow(splitPoint))) { continue; } // call out to master to do split now @@ -1952,13 +1911,13 @@ public void split(final TableName tableName, final byte[] splitPoint) throws IOE } @Override - public void splitRegion(final byte[] regionName, final byte [] splitPoint) throws IOException { + public void splitRegion(final byte[] regionName, final byte[] splitPoint) throws IOException { Pair regionServerPair = getRegion(regionName); if (regionServerPair == null) { throw new IllegalArgumentException("Invalid region: " + Bytes.toStringBinary(regionName)); } - if (regionServerPair.getFirst() != null && - regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { + if (regionServerPair.getFirst() != null + && regionServerPair.getFirst().getReplicaId() != HRegionInfo.DEFAULT_REPLICA_ID) { throw new IllegalArgumentException("Can't split replicas directly. " + "Replicas are auto-split when their primary is split."); } @@ -1997,11 +1956,9 @@ protected Void postOperationResult(final Void result, final long deadlineTs) /** * @param regionName Name of a region. - * @return a pair of HRegionInfo and ServerName if regionName is - * a verified region name (we call {@link - * MetaTableAccessor#getRegionLocation(Connection, byte[])} - * else null. - * Throw IllegalArgumentException if regionName is null. + * @return a pair of HRegionInfo and ServerName if regionName is a verified region + * name (we call {@link MetaTableAccessor#getRegionLocation(Connection, byte[])} else + * null. Throw IllegalArgumentException if regionName is null. * @throws IOException if a remote or network exception occurs */ Pair getRegion(final byte[] regionName) throws IOException { @@ -2052,19 +2009,16 @@ public boolean visit(Result data) throws IOException { } /** - * If the input is a region name, it is returned as is. If it's an - * encoded region name, the corresponding region is found from meta - * and its region name is returned. If we can't find any region in - * meta matching the input as either region name or encoded region - * name, the input is returned as is. We don't throw unknown - * region exception. + * If the input is a region name, it is returned as is. If it's an encoded region name, the + * corresponding region is found from meta and its region name is returned. If we can't find any + * region in meta matching the input as either region name or encoded region name, the input is + * returned as is. We don't throw unknown region exception. */ - private byte[] getRegionName( - final byte[] regionNameOrEncodedRegionName) throws IOException { - if (Bytes.equals(regionNameOrEncodedRegionName, - HRegionInfo.FIRST_META_REGIONINFO.getRegionName()) - || Bytes.equals(regionNameOrEncodedRegionName, - HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { + private byte[] getRegionName(final byte[] regionNameOrEncodedRegionName) throws IOException { + if (Bytes + .equals(regionNameOrEncodedRegionName, HRegionInfo.FIRST_META_REGIONINFO.getRegionName()) + || Bytes.equals(regionNameOrEncodedRegionName, + HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { return HRegionInfo.FIRST_META_REGIONINFO.getRegionName(); } byte[] tmp = regionNameOrEncodedRegionName; @@ -2082,8 +2036,7 @@ private byte[] getRegionName( * @throws IOException if a remote or network exception occurs. * @throws TableNotFoundException if table does not exist. */ - private TableName checkTableExists(final TableName tableName) - throws IOException { + private TableName checkTableExists(final TableName tableName) throws IOException { return executeCallable(new RpcRetryingCallable() { @Override protected TableName rpcCall(int callTimeout) throws Exception { @@ -2120,17 +2073,16 @@ protected Void rpcCall() throws Exception { } @Override - public synchronized void stopRegionServer(final String hostnamePort) - throws IOException { + public synchronized void stopRegionServer(final String hostnamePort) throws IOException { String hostname = Addressing.parseHostname(hostnamePort); int port = Addressing.parsePort(hostnamePort); final AdminService.BlockingInterface admin = - this.connection.getAdmin(ServerName.valueOf(hostname, port, 0)); + this.connection.getAdmin(ServerName.valueOf(hostname, port, 0)); // TODO: There is no timeout on this controller. Set one! HBaseRpcController controller = rpcControllerFactory.newController(); controller.setPriority(HConstants.HIGH_QOS); - StopServerRequest request = RequestConverter.buildStopServerRequest( - "Called by admin client " + this.connection.toString()); + StopServerRequest request = RequestConverter + .buildStopServerRequest("Called by admin client " + this.connection.toString()); try { admin.stopServer(controller, request); } catch (Exception e) { @@ -2140,27 +2092,27 @@ public synchronized void stopRegionServer(final String hostnamePort) @Override public boolean isMasterInMaintenanceMode() throws IOException { - return executeCallable(new MasterCallable(getConnection(), - this.rpcControllerFactory) { - @Override - protected IsInMaintenanceModeResponse rpcCall() throws Exception { - return master.isMasterInMaintenanceMode(getRpcController(), + return executeCallable( + new MasterCallable(getConnection(), this.rpcControllerFactory) { + @Override + protected IsInMaintenanceModeResponse rpcCall() throws Exception { + return master.isMasterInMaintenanceMode(getRpcController(), IsInMaintenanceModeRequest.newBuilder().build()); - } - }).getInMaintenanceMode(); + } + }).getInMaintenanceMode(); } @Override public ClusterMetrics getClusterMetrics(EnumSet

    Connection should be an unmanaged connection obtained via - * {@link ConnectionFactory#createConnection(Configuration)}.

    - * - *

    NOTE: The methods in here can do damage to a cluster if applied in the wrong sequence or at - * the wrong time. Use with caution. For experts only. These methods are only for the - * extreme case where the cluster has been damaged or has achieved an inconsistent state because - * of some unforeseen circumstance or bug and requires manual intervention. - * - *

    An instance of this class is lightweight and not-thread safe. A new instance should be created - * by each thread. Pooling or caching of the instance is not recommended.

    - * + *

    + * Connection should be an unmanaged connection obtained via + * {@link ConnectionFactory#createConnection(Configuration)}. + *

    + *

    + * NOTE: The methods in here can do damage to a cluster if applied in the wrong sequence or at the + * wrong time. Use with caution. For experts only. These methods are only for the extreme case where + * the cluster has been damaged or has achieved an inconsistent state because of some unforeseen + * circumstance or bug and requires manual intervention. + *

    + * An instance of this class is lightweight and not-thread safe. A new instance should be created by + * each thread. Pooling or caching of the instance is not recommended. + *

    * @see ConnectionFactory * @see ClusterConnection * @see Hbck @@ -103,9 +104,9 @@ public boolean isAborted() { @Override public TableState setTableStateInMeta(TableState state) throws IOException { try { - GetTableStateResponse response = hbck.setTableStateInMeta( - rpcControllerFactory.newController(), - RequestConverter.buildSetTableStateInMetaRequest(state)); + GetTableStateResponse response = + hbck.setTableStateInMeta(rpcControllerFactory.newController(), + RequestConverter.buildSetTableStateInMetaRequest(state)); return TableState.convert(state.getTableName(), response.getTableState()); } catch (ServiceException se) { LOG.debug("table={}, state={}", state.getTableName(), state.getState(), se); @@ -115,14 +116,14 @@ public TableState setTableStateInMeta(TableState state) throws IOException { @Override public Map setRegionStateInMeta( - Map nameOrEncodedName2State) throws IOException { + Map nameOrEncodedName2State) throws IOException { try { if (LOG.isDebugEnabled()) { nameOrEncodedName2State.forEach((k, v) -> LOG.debug("region={}, state={}", k, v)); } MasterProtos.SetRegionStateInMetaResponse response = - hbck.setRegionStateInMeta(rpcControllerFactory.newController(), - RequestConverter.buildSetRegionStateInMetaRequest(nameOrEncodedName2State)); + hbck.setRegionStateInMeta(rpcControllerFactory.newController(), + RequestConverter.buildSetRegionStateInMetaRequest(nameOrEncodedName2State)); Map result = new HashMap<>(); for (RegionSpecifierAndState nameAndState : response.getStatesList()) { result.put(nameAndState.getRegionSpecifier().getValue().toStringUtf8(), @@ -135,11 +136,10 @@ public Map setRegionStateInMeta( } @Override - public List assigns(List encodedRegionNames, boolean override) - throws IOException { + public List assigns(List encodedRegionNames, boolean override) throws IOException { try { AssignsResponse response = this.hbck.assigns(rpcControllerFactory.newController(), - RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toAssignRegionsRequest(encodedRegionNames, override)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -152,7 +152,7 @@ public List unassigns(List encodedRegionNames, boolean override) throws IOException { try { UnassignsResponse response = this.hbck.unassigns(rpcControllerFactory.newController(), - RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); + RequestConverter.toUnassignRegionsRequest(encodedRegionNames, override)); return response.getPidList(); } catch (ServiceException se) { LOG.debug(toCommaDelimitedString(encodedRegionNames), se); @@ -166,29 +166,25 @@ private static String toCommaDelimitedString(List list) { @Override public List bypassProcedure(List pids, long waitTime, boolean override, - boolean recursive) - throws IOException { - BypassProcedureResponse response = ProtobufUtil.call( - new Callable() { - @Override - public BypassProcedureResponse call() throws Exception { - try { - return hbck.bypassProcedure(rpcControllerFactory.newController(), - BypassProcedureRequest.newBuilder().addAllProcId(pids). - setWaitTime(waitTime).setOverride(override).setRecursive(recursive).build()); - } catch (Throwable t) { - LOG.error(pids.stream().map(i -> i.toString()). - collect(Collectors.joining(", ")), t); - throw t; - } - } - }); + boolean recursive) throws IOException { + BypassProcedureResponse response = ProtobufUtil.call(new Callable() { + @Override + public BypassProcedureResponse call() throws Exception { + try { + return hbck.bypassProcedure(rpcControllerFactory.newController(), + BypassProcedureRequest.newBuilder().addAllProcId(pids).setWaitTime(waitTime) + .setOverride(override).setRecursive(recursive).build()); + } catch (Throwable t) { + LOG.error(pids.stream().map(i -> i.toString()).collect(Collectors.joining(", ")), t); + throw t; + } + } + }); return response.getBypassedList(); } @Override - public List scheduleServerCrashProcedures(List serverNames) - throws IOException { + public List scheduleServerCrashProcedures(List serverNames) throws IOException { try { ScheduleServerCrashProcedureResponse response = this.hbck.scheduleServerCrashProcedure(rpcControllerFactory.newController(), @@ -207,9 +203,8 @@ public List scheduleServerCrashProcedures(List serverNames) public List scheduleSCPsForUnknownServers() throws IOException { try { ScheduleSCPsForUnknownServersResponse response = - this.hbck.scheduleSCPsForUnknownServers( - rpcControllerFactory.newController(), - ScheduleSCPsForUnknownServersRequest.newBuilder().build()); + this.hbck.scheduleSCPsForUnknownServers(rpcControllerFactory.newController(), + ScheduleSCPsForUnknownServersRequest.newBuilder().build()); return response.getPidList(); } catch (ServiceException se) { LOG.debug("Failed to run ServerCrashProcedures for unknown servers", se); @@ -221,7 +216,7 @@ public List scheduleSCPsForUnknownServers() throws IOException { public boolean runHbckChore() throws IOException { try { RunHbckChoreResponse response = this.hbck.runHbckChore(rpcControllerFactory.newController(), - RunHbckChoreRequest.newBuilder().build()); + RunHbckChoreRequest.newBuilder().build()); return response.getRan(); } catch (ServiceException se) { LOG.debug("Failed to run HBCK chore", se); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java index 11c9cc11b246..503b5f66147e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HRegionLocator.java @@ -1,24 +1,24 @@ -/** -* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.trace.HBaseSemanticAttributes.REGION_NAMES_KEY; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.context.Scope; @@ -39,6 +39,7 @@ import org.apache.hadoop.hbase.client.trace.TableSpanBuilder; import org.apache.hadoop.hbase.trace.TraceUtil; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; /** @@ -71,25 +72,20 @@ public void close() throws IOException { @Override public HRegionLocation getRegionLocation(byte[] row, int replicaId, boolean reload) - throws IOException { + throws IOException { final Supplier supplier = new TableSpanBuilder(connection) - .setName("HRegionLocator.getRegionLocation") - .setTableName(tableName); - return tracedLocationFuture( - () -> connection.locateRegion(tableName, row, !reload, true, replicaId) - .getRegionLocation(replicaId), - AsyncRegionLocator::getRegionNames, - supplier); + .setName("HRegionLocator.getRegionLocation").setTableName(tableName); + return tracedLocationFuture(() -> connection + .locateRegion(tableName, row, !reload, true, replicaId).getRegionLocation(replicaId), + AsyncRegionLocator::getRegionNames, supplier); } @Override public List getRegionLocations(byte[] row, boolean reload) throws IOException { final Supplier supplier = new TableSpanBuilder(connection) - .setName("HRegionLocator.getRegionLocations") - .setTableName(tableName); + .setName("HRegionLocator.getRegionLocations").setTableName(tableName); final RegionLocations locs = tracedLocationFuture( - () -> connection.locateRegion(tableName, row, !reload, true, - RegionInfo.DEFAULT_REPLICA_ID), + () -> connection.locateRegion(tableName, row, !reload, true, RegionInfo.DEFAULT_REPLICA_ID), AsyncRegionLocator::getRegionNames, supplier); return Arrays.asList(locs.getRegionLocations()); } @@ -97,8 +93,7 @@ public List getRegionLocations(byte[] row, boolean reload) thro @Override public List getAllRegionLocations() throws IOException { final Supplier supplier = new TableSpanBuilder(connection) - .setName("HRegionLocator.getAllRegionLocations") - .setTableName(tableName); + .setName("HRegionLocator.getAllRegionLocations").setTableName(tableName); return tracedLocationFuture(() -> { ArrayList regions = new ArrayList<>(); for (RegionLocations locations : listRegionLocations()) { @@ -115,19 +110,14 @@ private static List getRegionNames(List locations) { if (CollectionUtils.isEmpty(locations)) { return Collections.emptyList(); } - return locations.stream() - .filter(Objects::nonNull) - .map(AsyncRegionLocator::getRegionNames) - .filter(Objects::nonNull) - .flatMap(List::stream) - .collect(Collectors.toList()); + return locations.stream().filter(Objects::nonNull).map(AsyncRegionLocator::getRegionNames) + .filter(Objects::nonNull).flatMap(List::stream).collect(Collectors.toList()); } @Override public void clearRegionLocationCache() { final Supplier supplier = new TableSpanBuilder(connection) - .setName("HRegionLocator.clearRegionLocationCache") - .setTableName(tableName); + .setName("HRegionLocator.clearRegionLocationCache").setTableName(tableName); TraceUtil.trace(() -> connection.clearRegionCache(tableName), supplier); } @@ -138,8 +128,8 @@ public TableName getName() { private List listRegionLocations() throws IOException { if (TableName.isMetaTableName(tableName)) { - return Collections - .singletonList(connection.locateRegion(tableName, HConstants.EMPTY_START_ROW, false, true)); + return Collections.singletonList( + connection.locateRegion(tableName, HConstants.EMPTY_START_ROW, false, true)); } final List regions = new ArrayList<>(); MetaTableAccessor.Visitor visitor = new MetaTableAccessor.TableVisitorBase(tableName) { @@ -157,11 +147,8 @@ public boolean visitInternal(Result result) throws IOException { return regions; } - private R tracedLocationFuture( - TraceUtil.ThrowingCallable action, - Function> getRegionNames, - Supplier spanSupplier - ) throws T { + private R tracedLocationFuture(TraceUtil.ThrowingCallable action, + Function> getRegionNames, Supplier spanSupplier) throws T { final Span span = spanSupplier.get(); try (Scope ignored = span.makeCurrent()) { final R result = action.call(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java index f6221fd22f16..9f8d47d7f137 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTable.java @@ -19,14 +19,14 @@ // DO NOT MAKE USE OF THESE IMPORTS! THEY ARE HERE FOR COPROCESSOR ENDPOINTS ONLY. // Internally, we use shaded protobuf. This below are part of our public API. -//SEE ABOVE NOTE! +// SEE ABOVE NOTE! import static org.apache.hadoop.hbase.client.ConnectionUtils.checkHasFamilies; + import com.google.protobuf.Descriptors; import com.google.protobuf.Message; import com.google.protobuf.Service; import com.google.protobuf.ServiceException; - import io.opentelemetry.api.trace.Span; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; @@ -75,6 +75,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; @@ -84,23 +85,20 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse; /** - * An implementation of {@link Table}. Used to communicate with a single HBase table. - * Lightweight. Get as needed and just close when done. - * Instances of this class SHOULD NOT be constructed directly. - * Obtain an instance via {@link Connection}. See {@link ConnectionFactory} - * class comment for an example of how. - * - *

    This class is thread safe since 2.0.0 if not invoking any of the setter methods. - * All setters are moved into {@link TableBuilder} and reserved here only for keeping - * backward compatibility, and TODO will be removed soon. - * - *

    HTable is no longer a client API. Use {@link Table} instead. It is marked - * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in - * Hadoop - * Interface Classification - * There are no guarantees for backwards source / binary compatibility and methods or class can - * change or go away without deprecation. - * + * An implementation of {@link Table}. Used to communicate with a single HBase table. Lightweight. + * Get as needed and just close when done. Instances of this class SHOULD NOT be constructed + * directly. Obtain an instance via {@link Connection}. See {@link ConnectionFactory} class comment + * for an example of how. + *

    + * This class is thread safe since 2.0.0 if not invoking any of the setter methods. All setters are + * moved into {@link TableBuilder} and reserved here only for keeping backward compatibility, and + * TODO will be removed soon. + *

    + * HTable is no longer a client API. Use {@link Table} instead. It is marked + * InterfaceAudience.Private indicating that this is an HBase-internal class as defined in Hadoop + * Interface Classification There are no guarantees for backwards source / binary compatibility + * and methods or class can change or go away without deprecation. * @see Table * @see Admin * @see Connection @@ -118,7 +116,7 @@ public class HTable implements Table { private boolean closed = false; private final int scannerCaching; private final long scannerMaxResultSize; - private final ExecutorService pool; // For Multi & Scan + private final ExecutorService pool; // For Multi & Scan private int operationTimeoutMs; // global timeout for each blocking method with retrying rpc private final int rpcTimeoutMs; // FIXME we should use this for rpc like batch and checkAndXXX private int readRpcTimeoutMs; // timeout for each read rpc request @@ -146,17 +144,17 @@ public static ThreadPoolExecutor getDefaultExecutor(Configuration conf) { // we only create as many Runnables as there are region servers. It means // it also scales when new region servers are added. ThreadPoolExecutor pool = - new ThreadPoolExecutor(corePoolSize, maxThreads, keepAliveTime, TimeUnit.SECONDS, - new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("htable-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + new ThreadPoolExecutor(corePoolSize, maxThreads, keepAliveTime, TimeUnit.SECONDS, + new SynchronousQueue<>(), new ThreadFactoryBuilder().setNameFormat("htable-pool-%d") + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); pool.allowCoreThreadTimeOut(true); return pool; } /** - * Creates an object to access a HBase table. - * Used by HBase internally. DO NOT USE. See {@link ConnectionFactory} class comment for how to - * get a {@link Table} instance (use {@link Table} instead of {@link HTable}). + * Creates an object to access a HBase table. Used by HBase internally. DO NOT USE. See + * {@link ConnectionFactory} class comment for how to get a {@link Table} instance (use + * {@link Table} instead of {@link HTable}). * @param connection Connection to be used. * @param builder The table builder * @param rpcCallerFactory The RPC caller factory @@ -164,11 +162,9 @@ public static ThreadPoolExecutor getDefaultExecutor(Configuration conf) { * @param pool ExecutorService to be used. */ @InterfaceAudience.Private - protected HTable(final ConnectionImplementation connection, - final TableBuilderBase builder, + protected HTable(final ConnectionImplementation connection, final TableBuilderBase builder, final RpcRetryingCallerFactory rpcCallerFactory, - final RpcControllerFactory rpcControllerFactory, - final ExecutorService pool) { + final RpcControllerFactory rpcControllerFactory, final ExecutorService pool) { this.connection = Preconditions.checkNotNull(connection, "connection is null"); this.configuration = connection.getConfiguration(); this.connConfiguration = connection.getConnectionConfiguration(); @@ -222,8 +218,7 @@ public TableName getName() { } /** - * INTERNAL Used by unit tests and tools to do low-level - * manipulations. + * INTERNAL Used by unit tests and tools to do low-level manipulations. * @return A Connection instance. */ protected Connection getConnection() { @@ -248,42 +243,37 @@ public TableDescriptor getDescriptor() throws IOException { } /** - * Get the corresponding start keys and regions for an arbitrary range of - * keys. + * Get the corresponding start keys and regions for an arbitrary range of keys. *

    * @param startKey Starting row in range, inclusive * @param endKey Ending row in range * @param includeEndKey true if endRow is inclusive, false if exclusive - * @return A pair of list of start keys and list of HRegionLocations that - * contain the specified range + * @return A pair of list of start keys and list of HRegionLocations that contain the specified + * range * @throws IOException if a remote or network exception occurs */ - private Pair, List> getKeysAndRegionsInRange( - final byte[] startKey, final byte[] endKey, final boolean includeEndKey) - throws IOException { + private Pair, List> getKeysAndRegionsInRange(final byte[] startKey, + final byte[] endKey, final boolean includeEndKey) throws IOException { return getKeysAndRegionsInRange(startKey, endKey, includeEndKey, false); } /** - * Get the corresponding start keys and regions for an arbitrary range of - * keys. + * Get the corresponding start keys and regions for an arbitrary range of keys. *

    * @param startKey Starting row in range, inclusive * @param endKey Ending row in range * @param includeEndKey true if endRow is inclusive, false if exclusive * @param reload true to reload information or false to use cached information - * @return A pair of list of start keys and list of HRegionLocations that - * contain the specified range + * @return A pair of list of start keys and list of HRegionLocations that contain the specified + * range * @throws IOException if a remote or network exception occurs */ - private Pair, List> getKeysAndRegionsInRange( - final byte[] startKey, final byte[] endKey, final boolean includeEndKey, - final boolean reload) throws IOException { - final boolean endKeyIsEndOfTable = Bytes.equals(endKey,HConstants.EMPTY_END_ROW); + private Pair, List> getKeysAndRegionsInRange(final byte[] startKey, + final byte[] endKey, final boolean includeEndKey, final boolean reload) throws IOException { + final boolean endKeyIsEndOfTable = Bytes.equals(endKey, HConstants.EMPTY_END_ROW); if ((Bytes.compareTo(startKey, endKey) > 0) && !endKeyIsEndOfTable) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(startKey) + - " > " + Bytes.toStringBinary(endKey)); + throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(startKey) + " > " + + Bytes.toStringBinary(endKey)); } List keysInRange = new ArrayList<>(); List regionsInRange = new ArrayList<>(); @@ -300,8 +290,8 @@ private Pair, List> getKeysAndRegionsInRange( } /** - * The underlying {@link HTable} must not be closed. - * {@link Table#getScanner(Scan)} has other usage details. + * The underlying {@link HTable} must not be closed. {@link Table#getScanner(Scan)} has other + * usage details. */ @Override public ResultScanner getScanner(Scan scan) throws IOException { @@ -321,40 +311,39 @@ public ResultScanner getScanner(Scan scan) throws IOException { } if (scan.isReversed()) { - return new ReversedClientScanner(getConfiguration(), scan, getName(), - this.connection, this.rpcCallerFactory, this.rpcControllerFactory, - pool, connConfiguration.getReplicaCallTimeoutMicroSecondScan()); + return new ReversedClientScanner(getConfiguration(), scan, getName(), this.connection, + this.rpcCallerFactory, this.rpcControllerFactory, pool, + connConfiguration.getReplicaCallTimeoutMicroSecondScan()); } else { if (async) { return new ClientAsyncPrefetchScanner(getConfiguration(), scan, getName(), this.connection, - this.rpcCallerFactory, this.rpcControllerFactory, - pool, connConfiguration.getReplicaCallTimeoutMicroSecondScan()); + this.rpcCallerFactory, this.rpcControllerFactory, pool, + connConfiguration.getReplicaCallTimeoutMicroSecondScan()); } else { return new ClientSimpleScanner(getConfiguration(), scan, getName(), this.connection, - this.rpcCallerFactory, this.rpcControllerFactory, - pool, connConfiguration.getReplicaCallTimeoutMicroSecondScan()); + this.rpcCallerFactory, this.rpcControllerFactory, pool, + connConfiguration.getReplicaCallTimeoutMicroSecondScan()); } } } /** - * The underlying {@link HTable} must not be closed. - * {@link Table#getScanner(byte[])} has other usage details. + * The underlying {@link HTable} must not be closed. {@link Table#getScanner(byte[])} has other + * usage details. */ @Override - public ResultScanner getScanner(byte [] family) throws IOException { + public ResultScanner getScanner(byte[] family) throws IOException { Scan scan = new Scan(); scan.addFamily(family); return getScanner(scan); } /** - * The underlying {@link HTable} must not be closed. - * {@link Table#getScanner(byte[], byte[])} has other usage details. + * The underlying {@link HTable} must not be closed. {@link Table#getScanner(byte[], byte[])} has + * other usage details. */ @Override - public ResultScanner getScanner(byte [] family, byte [] qualifier) - throws IOException { + public ResultScanner getScanner(byte[] family, byte[] qualifier) throws IOException { Scan scan = new Scan(); scan.addColumn(family, qualifier); return getScanner(scan); @@ -362,9 +351,8 @@ public ResultScanner getScanner(byte [] family, byte [] qualifier) @Override public Result get(final Get get) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(get); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(get); return TraceUtil.trace(() -> get(get, get.isCheckExistenceOnly()), supplier); } @@ -373,42 +361,41 @@ private Result get(Get get, final boolean checkExistenceOnly) throws IOException if (get.isCheckExistenceOnly() != checkExistenceOnly || get.getConsistency() == null) { get = ReflectionUtils.newInstance(get.getClass(), get); get.setCheckExistenceOnly(checkExistenceOnly); - if (get.getConsistency() == null){ + if (get.getConsistency() == null) { get.setConsistency(DEFAULT_CONSISTENCY); } } if (get.getConsistency() == Consistency.STRONG) { final Get configuredGet = get; - ClientServiceCallable callable = new ClientServiceCallable(this.connection, getName(), - get.getRow(), this.rpcControllerFactory.newController(), get.getPriority()) { + ClientServiceCallable callable = new ClientServiceCallable(this.connection, + getName(), get.getRow(), this.rpcControllerFactory.newController(), get.getPriority()) { @Override protected Result rpcCall() throws Exception { - ClientProtos.GetRequest request = RequestConverter.buildGetRequest( - getLocation().getRegionInfo().getRegionName(), configuredGet); + ClientProtos.GetRequest request = RequestConverter + .buildGetRequest(getLocation().getRegionInfo().getRegionName(), configuredGet); ClientProtos.GetResponse response = doGet(request); - return response == null? null: - ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + return response == null ? null + : ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); } }; - return rpcCallerFactory.newCaller(readRpcTimeoutMs).callWithRetries(callable, - this.operationTimeoutMs); + return rpcCallerFactory. newCaller(readRpcTimeoutMs).callWithRetries(callable, + this.operationTimeoutMs); } // Call that takes into account the replica - RpcRetryingCallerWithReadReplicas callable = new RpcRetryingCallerWithReadReplicas( - rpcControllerFactory, tableName, this.connection, get, pool, - connConfiguration.getRetriesNumber(), operationTimeoutMs, readRpcTimeoutMs, - connConfiguration.getPrimaryCallTimeoutMicroSecond()); + RpcRetryingCallerWithReadReplicas callable = + new RpcRetryingCallerWithReadReplicas(rpcControllerFactory, tableName, this.connection, get, + pool, connConfiguration.getRetriesNumber(), operationTimeoutMs, readRpcTimeoutMs, + connConfiguration.getPrimaryCallTimeoutMicroSecond()); return callable.call(operationTimeoutMs); } @Override public Result[] get(List gets) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH) - .setContainerOperations(gets); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(gets); return TraceUtil.trace(() -> { if (gets.size() == 1) { return new Result[] { get(gets.get(0)) }; @@ -458,20 +445,13 @@ public void batch(final List actions, final Object[] results) public void batch(final List actions, final Object[] results, int rpcTimeout) throws InterruptedException, IOException { - AsyncProcessTask task = AsyncProcessTask.newBuilder() - .setPool(pool) - .setTableName(tableName) - .setRowAccess(actions) - .setResults(results) - .setRpcTimeout(rpcTimeout) - .setOperationTimeout(operationTimeoutMs) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL) - .build(); - final Span span = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH) - .setContainerOperations(actions) - .build(); + AsyncProcessTask task = + AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName).setRowAccess(actions) + .setResults(results).setRpcTimeout(rpcTimeout).setOperationTimeout(operationTimeoutMs) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); + final Span span = new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(actions) + .build(); try (Scope ignored = span.makeCurrent()) { AsyncRequestFuture ars = multiAp.submit(task); ars.waitUntilDone(); @@ -486,33 +466,25 @@ public void batch(final List actions, final Object[] results, int } @Override - public void batchCallback( - final List actions, final Object[] results, final Batch.Callback callback) - throws IOException, InterruptedException { + public void batchCallback(final List actions, final Object[] results, + final Batch.Callback callback) throws IOException, InterruptedException { doBatchWithCallback(actions, results, callback, connection, pool, tableName); } public static void doBatchWithCallback(List actions, Object[] results, - Callback callback, ClusterConnection connection, ExecutorService pool, TableName tableName) - throws InterruptedIOException, RetriesExhaustedWithDetailsException { + Callback callback, ClusterConnection connection, ExecutorService pool, TableName tableName) + throws InterruptedIOException, RetriesExhaustedWithDetailsException { int operationTimeout = connection.getConnectionConfiguration().getOperationTimeout(); int writeTimeout = connection.getConfiguration().getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); - AsyncProcessTask task = AsyncProcessTask.newBuilder(callback) - .setPool(pool) - .setTableName(tableName) - .setRowAccess(actions) - .setResults(results) - .setOperationTimeout(operationTimeout) - .setRpcTimeout(writeTimeout) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL) - .build(); - final Span span = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH) - .setContainerOperations(actions) - .build(); + connection.getConfiguration().getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + AsyncProcessTask task = AsyncProcessTask.newBuilder(callback).setPool(pool) + .setTableName(tableName).setRowAccess(actions).setResults(results) + .setOperationTimeout(operationTimeout).setRpcTimeout(writeTimeout) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); + final Span span = new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(actions) + .build(); try (Scope ignored = span.makeCurrent()) { AsyncRequestFuture ars = connection.getAsyncProcess().submit(task); ars.waitUntilDone(); @@ -527,23 +499,22 @@ public static void doBatchWithCallback(List actions, Object[] @Override public void delete(final Delete delete) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(delete); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(delete); TraceUtil.trace(() -> { ClientServiceCallable callable = - new ClientServiceCallable(this.connection, getName(), delete.getRow(), - this.rpcControllerFactory.newController(), delete.getPriority()) { - @Override - protected Void rpcCall() throws Exception { - MutateRequest request = RequestConverter - .buildMutateRequest(getLocation().getRegionInfo().getRegionName(), delete); - doMutate(request); - return null; - } - }; - rpcCallerFactory.newCaller(this.writeRpcTimeoutMs) - .callWithRetries(callable, this.operationTimeoutMs); + new ClientServiceCallable(this.connection, getName(), delete.getRow(), + this.rpcControllerFactory.newController(), delete.getPriority()) { + @Override + protected Void rpcCall() throws Exception { + MutateRequest request = RequestConverter + .buildMutateRequest(getLocation().getRegionInfo().getRegionName(), delete); + doMutate(request); + return null; + } + }; + rpcCallerFactory. newCaller(this.writeRpcTimeoutMs).callWithRetries(callable, + this.operationTimeoutMs); }, supplier); } @@ -570,24 +541,22 @@ public void delete(final List deletes) throws IOException { @Override public void put(final Put put) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(put); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(put); TraceUtil.trace(() -> { validatePut(put); - ClientServiceCallable callable = - new ClientServiceCallable(this.connection, getName(), put.getRow(), - this.rpcControllerFactory.newController(), put.getPriority()) { - @Override - protected Void rpcCall() throws Exception { - MutateRequest request = RequestConverter + ClientServiceCallable callable = new ClientServiceCallable(this.connection, + getName(), put.getRow(), this.rpcControllerFactory.newController(), put.getPriority()) { + @Override + protected Void rpcCall() throws Exception { + MutateRequest request = RequestConverter .buildMutateRequest(getLocation().getRegionInfo().getRegionName(), put); - doMutate(request); - return null; - } - }; - rpcCallerFactory.newCaller(this.writeRpcTimeoutMs) - .callWithRetries(callable, this.operationTimeoutMs); + doMutate(request); + return null; + } + }; + rpcCallerFactory. newCaller(this.writeRpcTimeoutMs).callWithRetries(callable, + this.operationTimeoutMs); }, supplier); } @@ -606,44 +575,38 @@ public void put(final List puts) throws IOException { @Override public Result mutateRow(final RowMutations rm) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH) - .setContainerOperations(rm); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(rm); return TraceUtil.trace(() -> { long nonceGroup = getNonceGroup(); long nonce = getNonce(); CancellableRegionServerCallable callable = - new CancellableRegionServerCallable(this.connection, getName(), rm.getRow(), - rpcControllerFactory.newController(), writeRpcTimeoutMs, - new RetryingTimeTracker().start(), rm.getMaxPriority()) { - @Override - protected MultiResponse rpcCall() throws Exception { - MultiRequest request = RequestConverter.buildMultiRequest( - getLocation().getRegionInfo().getRegionName(), rm, nonceGroup, nonce); - ClientProtos.MultiResponse response = doMulti(request); - ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); - if (res.hasException()) { - Throwable ex = ProtobufUtil.toException(res.getException()); - if (ex instanceof IOException) { - throw (IOException) ex; + new CancellableRegionServerCallable(this.connection, getName(), + rm.getRow(), rpcControllerFactory.newController(), writeRpcTimeoutMs, + new RetryingTimeTracker().start(), rm.getMaxPriority()) { + @Override + protected MultiResponse rpcCall() throws Exception { + MultiRequest request = RequestConverter.buildMultiRequest( + getLocation().getRegionInfo().getRegionName(), rm, nonceGroup, nonce); + ClientProtos.MultiResponse response = doMulti(request); + ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if (ex instanceof IOException) { + throw (IOException) ex; + } + throw new IOException("Failed to mutate row: " + Bytes.toStringBinary(rm.getRow()), + ex); + } + return ResponseConverter.getResults(request, response, getRpcControllerCellScanner()); } - throw new IOException("Failed to mutate row: " + Bytes.toStringBinary(rm.getRow()), ex); - } - return ResponseConverter.getResults(request, response, getRpcControllerCellScanner()); - } - }; + }; Object[] results = new Object[rm.getMutations().size()]; - AsyncProcessTask task = AsyncProcessTask.newBuilder() - .setPool(pool) - .setTableName(tableName) - .setRowAccess(rm.getMutations()) - .setCallable(callable) - .setRpcTimeout(writeRpcTimeoutMs) - .setOperationTimeout(operationTimeoutMs) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL) - .setResults(results) - .build(); + AsyncProcessTask task = AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName) + .setRowAccess(rm.getMutations()).setCallable(callable).setRpcTimeout(writeRpcTimeoutMs) + .setOperationTimeout(operationTimeoutMs) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).setResults(results).build(); AsyncRequestFuture ars = multiAp.submit(task); ars.waitUntilDone(); if (ars.hasError()) { @@ -663,70 +626,65 @@ private long getNonce() { @Override public Result append(final Append append) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(append); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(append); return TraceUtil.trace(() -> { checkHasFamilies(append); NoncedRegionServerCallable callable = new NoncedRegionServerCallable(this.connection, getName(), append.getRow(), this.rpcControllerFactory.newController(), append.getPriority()) { - @Override - protected Result rpcCall() throws Exception { - MutateRequest request = RequestConverter.buildMutateRequest( - getLocation().getRegionInfo().getRegionName(), append, super.getNonceGroup(), - super.getNonce()); - MutateResponse response = doMutate(request); - if (!response.hasResult()) { - return null; - } - return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); - } - }; - return rpcCallerFactory. newCaller(this.writeRpcTimeoutMs). - callWithRetries(callable, this.operationTimeoutMs); + @Override + protected Result rpcCall() throws Exception { + MutateRequest request = + RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(), + append, super.getNonceGroup(), super.getNonce()); + MutateResponse response = doMutate(request); + if (!response.hasResult()) { + return null; + } + return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + } + }; + return rpcCallerFactory. newCaller(this.writeRpcTimeoutMs).callWithRetries(callable, + this.operationTimeoutMs); }, supplier); } @Override public Result increment(final Increment increment) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(increment); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(increment); return TraceUtil.trace(() -> { checkHasFamilies(increment); NoncedRegionServerCallable callable = new NoncedRegionServerCallable(this.connection, getName(), increment.getRow(), this.rpcControllerFactory.newController(), increment.getPriority()) { - @Override - protected Result rpcCall() throws Exception { - MutateRequest request = RequestConverter.buildMutateRequest( - getLocation().getRegionInfo().getRegionName(), increment, super.getNonceGroup(), - super.getNonce()); - MutateResponse response = doMutate(request); - // Should this check for null like append does? - return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); - } - }; + @Override + protected Result rpcCall() throws Exception { + MutateRequest request = + RequestConverter.buildMutateRequest(getLocation().getRegionInfo().getRegionName(), + increment, super.getNonceGroup(), super.getNonce()); + MutateResponse response = doMutate(request); + // Should this check for null like append does? + return ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + } + }; return rpcCallerFactory. newCaller(writeRpcTimeoutMs).callWithRetries(callable, - this.operationTimeoutMs); + this.operationTimeoutMs); }, supplier); } @Override - public long incrementColumnValue(final byte [] row, final byte [] family, - final byte [] qualifier, final long amount) - throws IOException { + public long incrementColumnValue(final byte[] row, final byte[] family, final byte[] qualifier, + final long amount) throws IOException { return incrementColumnValue(row, family, qualifier, amount, Durability.SYNC_WAL); } @Override - public long incrementColumnValue(final byte [] row, final byte [] family, - final byte [] qualifier, final long amount, final Durability durability) - throws IOException { + public long incrementColumnValue(final byte[] row, final byte[] family, final byte[] qualifier, + final long amount, final Durability durability) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.INCREMENT); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.INCREMENT); return TraceUtil.trace(() -> { NullPointerException npe = null; if (row == null) { @@ -735,65 +693,62 @@ public long incrementColumnValue(final byte [] row, final byte [] family, npe = new NullPointerException("family is null"); } if (npe != null) { - throw new IOException( - "Invalid arguments to incrementColumnValue", npe); + throw new IOException("Invalid arguments to incrementColumnValue", npe); } NoncedRegionServerCallable callable = new NoncedRegionServerCallable(this.connection, getName(), row, this.rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Long rpcCall() throws Exception { - MutateRequest request = RequestConverter.buildIncrementRequest( - getLocation().getRegionInfo().getRegionName(), row, family, - qualifier, amount, durability, super.getNonceGroup(), super.getNonce()); - MutateResponse response = doMutate(request); - Result result = - ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); - return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier))); - } - }; - return rpcCallerFactory. newCaller(this.writeRpcTimeoutMs). - callWithRetries(callable, this.operationTimeoutMs); + @Override + protected Long rpcCall() throws Exception { + MutateRequest request = RequestConverter.buildIncrementRequest( + getLocation().getRegionInfo().getRegionName(), row, family, qualifier, amount, + durability, super.getNonceGroup(), super.getNonce()); + MutateResponse response = doMutate(request); + Result result = + ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner()); + return Long.valueOf(Bytes.toLong(result.getValue(family, qualifier))); + } + }; + return rpcCallerFactory. newCaller(this.writeRpcTimeoutMs).callWithRetries(callable, + this.operationTimeoutMs); }, supplier); } @Override @Deprecated - public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier, - final byte [] value, final Put put) throws IOException { + public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier, + final byte[] value, final Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.PUT); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.PUT); return TraceUtil.trace( () -> doCheckAndMutate(row, family, qualifier, CompareOperator.EQUAL, value, null, null, put) - .isSuccess(), + .isSuccess(), supplier); } @Override @Deprecated - public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOp compareOp, final byte [] value, final Put put) throws IOException { + public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier, + final CompareOp compareOp, final byte[] value, final Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.PUT); - return TraceUtil.trace( - () -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null, - null, put).isSuccess(), - supplier); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.PUT); + return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, + toCompareOperator(compareOp), value, null, null, put).isSuccess(), supplier); } @Override @Deprecated - public boolean checkAndPut(final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, final byte [] value, final Put put) throws IOException { + public boolean checkAndPut(final byte[] row, final byte[] family, final byte[] qualifier, + final CompareOperator op, final byte[] value, final Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.PUT); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.PUT); return TraceUtil.trace( () -> doCheckAndMutate(row, family, qualifier, op, value, null, null, put).isSuccess(), supplier); @@ -802,39 +757,35 @@ public boolean checkAndPut(final byte [] row, final byte [] family, final byte [ @Override @Deprecated public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, - final byte[] value, final Delete delete) throws IOException { + final byte[] value, final Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.DELETE); - return TraceUtil.trace( - () -> doCheckAndMutate(row, family, qualifier, CompareOperator.EQUAL, value, null, null, - delete).isSuccess(), - supplier); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.DELETE); + return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, CompareOperator.EQUAL, + value, null, null, delete).isSuccess(), supplier); } @Override @Deprecated public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOp compareOp, final byte[] value, final Delete delete) throws IOException { + final CompareOp compareOp, final byte[] value, final Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.DELETE); - return TraceUtil.trace( - () -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null, - null, delete).isSuccess(), - supplier); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.DELETE); + return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, + toCompareOperator(compareOp), value, null, null, delete).isSuccess(), supplier); } @Override @Deprecated public boolean checkAndDelete(final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final Delete delete) throws IOException { + final CompareOperator op, final byte[] value, final Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, HBaseSemanticAttributes.Operation.DELETE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE, + HBaseSemanticAttributes.Operation.DELETE); return TraceUtil.trace( () -> doCheckAndMutate(row, family, qualifier, op, value, null, null, delete).isSuccess(), supplier); @@ -853,49 +804,44 @@ public CheckAndMutateWithFilterBuilder checkAndMutate(byte[] row, Filter filter) } private CheckAndMutateResult doCheckAndMutate(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, - final TimeRange timeRange, final RowMutations rm) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, + final TimeRange timeRange, final RowMutations rm) throws IOException { long nonceGroup = getNonceGroup(); long nonce = getNonce(); CancellableRegionServerCallable callable = - new CancellableRegionServerCallable(connection, getName(), rm.getRow(), - rpcControllerFactory.newController(), writeRpcTimeoutMs, new RetryingTimeTracker().start(), - rm.getMaxPriority()) { - @Override - protected MultiResponse rpcCall() throws Exception { - MultiRequest request = RequestConverter - .buildMultiRequest(getLocation().getRegionInfo().getRegionName(), row, family, - qualifier, op, value, filter, timeRange, rm, nonceGroup, nonce); - ClientProtos.MultiResponse response = doMulti(request); - ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); - if (res.hasException()) { - Throwable ex = ProtobufUtil.toException(res.getException()); - if (ex instanceof IOException) { - throw (IOException) ex; + new CancellableRegionServerCallable(connection, getName(), rm.getRow(), + rpcControllerFactory.newController(), writeRpcTimeoutMs, + new RetryingTimeTracker().start(), rm.getMaxPriority()) { + @Override + protected MultiResponse rpcCall() throws Exception { + MultiRequest request = + RequestConverter.buildMultiRequest(getLocation().getRegionInfo().getRegionName(), + row, family, qualifier, op, value, filter, timeRange, rm, nonceGroup, nonce); + ClientProtos.MultiResponse response = doMulti(request); + ClientProtos.RegionActionResult res = response.getRegionActionResultList().get(0); + if (res.hasException()) { + Throwable ex = ProtobufUtil.toException(res.getException()); + if (ex instanceof IOException) { + throw (IOException) ex; + } + throw new IOException( + "Failed to checkAndMutate row: " + Bytes.toStringBinary(rm.getRow()), ex); + } + return ResponseConverter.getResults(request, response, getRpcControllerCellScanner()); } - throw new IOException( - "Failed to checkAndMutate row: " + Bytes.toStringBinary(rm.getRow()), ex); - } - return ResponseConverter.getResults(request, response, getRpcControllerCellScanner()); - } - }; + }; /** - * Currently, we use one array to store 'processed' flag which is returned by server. - * It is excessive to send such a large array, but that is required by the framework right now - * */ + * Currently, we use one array to store 'processed' flag which is returned by server. It is + * excessive to send such a large array, but that is required by the framework right now + */ Object[] results = new Object[rm.getMutations().size()]; - AsyncProcessTask task = AsyncProcessTask.newBuilder() - .setPool(pool) - .setTableName(tableName) - .setRowAccess(rm.getMutations()) - .setResults(results) - .setCallable(callable) - // TODO any better timeout? - .setRpcTimeout(Math.max(readRpcTimeoutMs, writeRpcTimeoutMs)) - .setOperationTimeout(operationTimeoutMs) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL) - .build(); + AsyncProcessTask task = AsyncProcessTask.newBuilder().setPool(pool).setTableName(tableName) + .setRowAccess(rm.getMutations()).setResults(results).setCallable(callable) + // TODO any better timeout? + .setRpcTimeout(Math.max(readRpcTimeoutMs, writeRpcTimeoutMs)) + .setOperationTimeout(operationTimeoutMs) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); AsyncRequestFuture ars = multiAp.submit(task); ars.waitUntilDone(); if (ars.hasError()) { @@ -907,26 +853,22 @@ protected MultiResponse rpcCall() throws Exception { @Override @Deprecated - public boolean checkAndMutate(final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOp compareOp, final byte [] value, final RowMutations rm) throws IOException { + public boolean checkAndMutate(final byte[] row, final byte[] family, final byte[] qualifier, + final CompareOp compareOp, final byte[] value, final RowMutations rm) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(rm); - return TraceUtil.trace( - () -> doCheckAndMutate(row, family, qualifier, toCompareOperator(compareOp), value, null, - null, rm).isSuccess(), - supplier); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(rm); + return TraceUtil.trace(() -> doCheckAndMutate(row, family, qualifier, + toCompareOperator(compareOp), value, null, null, rm).isSuccess(), supplier); } @Override @Deprecated - public boolean checkAndMutate(final byte [] row, final byte [] family, final byte [] qualifier, - final CompareOperator op, final byte [] value, final RowMutations rm) throws IOException { + public boolean checkAndMutate(final byte[] row, final byte[] family, final byte[] qualifier, + final CompareOperator op, final byte[] value, final RowMutations rm) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) - .setContainerOperations(rm); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE) + .setContainerOperations(rm); return TraceUtil.trace( () -> doCheckAndMutate(row, family, qualifier, op, value, null, null, rm).isSuccess(), supplier); @@ -934,14 +876,13 @@ public boolean checkAndMutate(final byte [] row, final byte [] family, final byt @Override public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(checkAndMutate) - .setContainerOperations(checkAndMutate); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(checkAndMutate).setContainerOperations(checkAndMutate); return TraceUtil.trace(() -> { Row action = checkAndMutate.getAction(); - if (action instanceof Put || action instanceof Delete || action instanceof Increment || - action instanceof Append) { + if (action instanceof Put || action instanceof Delete || action instanceof Increment + || action instanceof Append) { if (action instanceof Put) { validatePut((Put) action); } @@ -957,37 +898,36 @@ public CheckAndMutateResult checkAndMutate(CheckAndMutate checkAndMutate) throws } private CheckAndMutateResult doCheckAndMutate(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, - final TimeRange timeRange, final Mutation mutation) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, + final TimeRange timeRange, final Mutation mutation) throws IOException { long nonceGroup = getNonceGroup(); long nonce = getNonce(); ClientServiceCallable callable = - new ClientServiceCallable(this.connection, getName(), row, - this.rpcControllerFactory.newController(), mutation.getPriority()) { - @Override - protected CheckAndMutateResult rpcCall() throws Exception { - MutateRequest request = RequestConverter.buildMutateRequest( - getLocation().getRegionInfo().getRegionName(), row, family, qualifier, op, value, - filter, timeRange, mutation, nonceGroup, nonce); - MutateResponse response = doMutate(request); - if (response.hasResult()) { - return new CheckAndMutateResult(response.getProcessed(), - ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner())); + new ClientServiceCallable(this.connection, getName(), row, + this.rpcControllerFactory.newController(), mutation.getPriority()) { + @Override + protected CheckAndMutateResult rpcCall() throws Exception { + MutateRequest request = RequestConverter.buildMutateRequest( + getLocation().getRegionInfo().getRegionName(), row, family, qualifier, op, value, + filter, timeRange, mutation, nonceGroup, nonce); + MutateResponse response = doMutate(request); + if (response.hasResult()) { + return new CheckAndMutateResult(response.getProcessed(), + ProtobufUtil.toResult(response.getResult(), getRpcControllerCellScanner())); + } + return new CheckAndMutateResult(response.getProcessed(), null); } - return new CheckAndMutateResult(response.getProcessed(), null); - } - }; + }; return rpcCallerFactory. newCaller(this.writeRpcTimeoutMs) - .callWithRetries(callable, this.operationTimeoutMs); + .callWithRetries(callable, this.operationTimeoutMs); } @Override public List checkAndMutate(List checkAndMutates) - throws IOException { + throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH) - .setContainerOperations(checkAndMutates); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.BATCH) + .setContainerOperations(checkAndMutates); return TraceUtil.trace(() -> { if (checkAndMutates.isEmpty()) { return Collections.emptyList(); @@ -1043,9 +983,8 @@ private CompareOperator toCompareOperator(CompareOp compareOp) { @Override public boolean exists(final Get get) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(get); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName).setOperation(get); return TraceUtil.trace(() -> { Result r = get(get, true); assert r.getExists() != null; @@ -1055,10 +994,9 @@ public boolean exists(final Get get) throws IOException { @Override public boolean[] exists(List gets) throws IOException { - final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.BATCH) - .setContainerOperations(gets); + final Supplier supplier = + new TableOperationSpanBuilder(connection).setTableName(tableName) + .setOperation(HBaseSemanticAttributes.Operation.BATCH).setContainerOperations(gets); return TraceUtil.trace(() -> { if (gets.isEmpty()) { return new boolean[] {}; @@ -1094,28 +1032,23 @@ public boolean[] exists(List gets) throws IOException { } /** - * Process a mixed batch of Get, Put and Delete actions. All actions for a - * RegionServer are forwarded in one RPC call. Queries are executed in parallel. - * + * Process a mixed batch of Get, Put and Delete actions. All actions for a RegionServer are + * forwarded in one RPC call. Queries are executed in parallel. * @param list The collection of actions. - * @param results An empty array, same size as list. If an exception is thrown, - * you can test here for partial results, and to determine which actions - * processed successfully. - * @throws IOException if there are problems talking to META. Per-item - * exceptions are stored in the results array. + * @param results An empty array, same size as list. If an exception is thrown, you can test here + * for partial results, and to determine which actions processed successfully. + * @throws IOException if there are problems talking to META. Per-item exceptions are stored in + * the results array. */ - public void processBatchCallback( - final List list, final Object[] results, final Batch.Callback callback) - throws IOException, InterruptedException { + public void processBatchCallback(final List list, final Object[] results, + final Batch.Callback callback) throws IOException, InterruptedException { this.batchCallback(list, results, callback); } @Override public void close() throws IOException { - final Supplier supplier = new TableSpanBuilder(connection) - .setName("HTable.close") - .setTableName(tableName) - .setSpanKind(SpanKind.INTERNAL); + final Supplier supplier = new TableSpanBuilder(connection).setName("HTable.close") + .setTableName(tableName).setSpanKind(SpanKind.INTERNAL); TraceUtil.trace(() -> { if (this.closed) { return; @@ -1151,8 +1084,8 @@ ExecutorService getPool() { } /** - * Explicitly clears the region cache to fetch the latest value from META. - * This is a power user function: avoid unless you know the ramifications. + * Explicitly clears the region cache to fetch the latest value from META. This is a power user + * function: avoid unless you know the ramifications. */ public void clearRegionCache() { this.connection.clearRegionLocationCache(); @@ -1164,11 +1097,11 @@ public CoprocessorRpcChannel coprocessorService(byte[] row) { } @Override - public Map coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable) + public Map coprocessorService(final Class service, + byte[] startKey, byte[] endKey, final Batch.Call callable) throws ServiceException, Throwable { - final Map results = Collections.synchronizedMap( - new TreeMap<>(Bytes.BYTES_COMPARATOR)); + final Map results = + Collections.synchronizedMap(new TreeMap<>(Bytes.BYTES_COMPARATOR)); coprocessorService(service, startKey, endKey, callable, (region, row, value) -> { if (region != null) { results.put(region, value); @@ -1178,24 +1111,23 @@ public Map coprocessorService(final Class se } @Override - public void coprocessorService(final Class service, - byte[] startKey, byte[] endKey, final Batch.Call callable, - final Batch.Callback callback) throws ServiceException, Throwable { + public void coprocessorService(final Class service, byte[] startKey, + byte[] endKey, final Batch.Call callable, final Batch.Callback callback) + throws ServiceException, Throwable { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); TraceUtil.trace(() -> { final Context context = Context.current(); final ExecutorService wrappedPool = context.wrap(pool); // get regions covered by the row range List keys = getStartKeysInRange(startKey, endKey); - Map> futures = new TreeMap<>(Bytes.BYTES_COMPARATOR); + Map> futures = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (final byte[] r : keys) { final RegionCoprocessorRpcChannel channel = - new RegionCoprocessorRpcChannel(connection, tableName, r); + new RegionCoprocessorRpcChannel(connection, tableName, r); Future future = wrappedPool.submit(() -> { T instance = - org.apache.hadoop.hbase.protobuf.ProtobufUtil.newServiceStub(service, channel); + org.apache.hadoop.hbase.protobuf.ProtobufUtil.newServiceStub(service, channel); R result = callable.call(instance); byte[] region = channel.getLastRegion(); if (callback != null) { @@ -1205,7 +1137,7 @@ public void coprocessorService(final Class service, }); futures.put(r, future); } - for (Map.Entry> e : futures.entrySet()) { + for (Map.Entry> e : futures.entrySet()) { try { e.getValue().get(); } catch (ExecutionException ee) { @@ -1214,14 +1146,13 @@ public void coprocessorService(final Class service, throw ee.getCause(); } catch (InterruptedException ie) { throw new InterruptedIOException("Interrupted calling coprocessor service " - + service.getName() + " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie); + + service.getName() + " for row " + Bytes.toStringBinary(e.getKey())).initCause(ie); } } }, supplier); } - private List getStartKeysInRange(byte[] start, byte[] end) - throws IOException { + private List getStartKeysInRange(byte[] start, byte[] end) throws IOException { if (start == null) { start = HConstants.EMPTY_START_ROW; } @@ -1307,10 +1238,10 @@ public String toString() { @Override public Map batchCoprocessorService( - Descriptors.MethodDescriptor methodDescriptor, Message request, - byte[] startKey, byte[] endKey, R responsePrototype) throws ServiceException, Throwable { - final Map results = Collections.synchronizedMap(new TreeMap<>( - Bytes.BYTES_COMPARATOR)); + Descriptors.MethodDescriptor methodDescriptor, Message request, byte[] startKey, + byte[] endKey, R responsePrototype) throws ServiceException, Throwable { + final Map results = + Collections.synchronizedMap(new TreeMap<>(Bytes.BYTES_COMPARATOR)); batchCoprocessorService(methodDescriptor, request, startKey, endKey, responsePrototype, (region, row, result) -> { if (region != null) { @@ -1322,22 +1253,20 @@ public Map batchCoprocessorService( @Override public void batchCoprocessorService( - final Descriptors.MethodDescriptor methodDescriptor, final Message request, - byte[] startKey, byte[] endKey, final R responsePrototype, final Callback callback) + final Descriptors.MethodDescriptor methodDescriptor, final Message request, byte[] startKey, + byte[] endKey, final R responsePrototype, final Callback callback) throws ServiceException, Throwable { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.COPROC_EXEC); TraceUtil.trace(() -> { final Context context = Context.current(); - final byte[] sanitizedStartKey = Optional.ofNullable(startKey) - .orElse(HConstants.EMPTY_START_ROW); - final byte[] sanitizedEndKey = Optional.ofNullable(endKey) - .orElse(HConstants.EMPTY_END_ROW); + final byte[] sanitizedStartKey = + Optional.ofNullable(startKey).orElse(HConstants.EMPTY_START_ROW); + final byte[] sanitizedEndKey = Optional.ofNullable(endKey).orElse(HConstants.EMPTY_END_ROW); // get regions covered by the row range Pair, List> keysAndRegions = - getKeysAndRegionsInRange(sanitizedStartKey, sanitizedEndKey, true); + getKeysAndRegionsInRange(sanitizedStartKey, sanitizedEndKey, true); List keys = keysAndRegions.getFirst(); List regions = keysAndRegions.getSecond(); @@ -1350,12 +1279,12 @@ public void batchCoprocessorService( List execs = new ArrayList<>(keys.size()); final Map execsByRow = - new TreeMap<>(Bytes.BYTES_COMPARATOR); + new TreeMap<>(Bytes.BYTES_COMPARATOR); for (int i = 0; i < keys.size(); i++) { final byte[] rowKey = keys.get(i); final byte[] region = regions.get(i).getRegionInfo().getRegionName(); RegionCoprocessorServiceExec exec = - new RegionCoprocessorServiceExec(region, rowKey, methodDescriptor, request); + new RegionCoprocessorServiceExec(region, rowKey, methodDescriptor, request); execs.add(exec); execsByRow.put(rowKey, exec); } @@ -1368,47 +1297,42 @@ public void batchCoprocessorService( Object[] results = new Object[execs.size()]; AsyncProcess asyncProcess = new AsyncProcess(connection, configuration, - RpcRetryingCallerFactory.instantiate(configuration, connection.getStatisticsTracker()), - RpcControllerFactory.instantiate(configuration)); + RpcRetryingCallerFactory.instantiate(configuration, connection.getStatisticsTracker()), + RpcControllerFactory.instantiate(configuration)); Callback resultsCallback = - (byte[] region, byte[] row, ClientProtos.CoprocessorServiceResult serviceResult) -> { - if (LOG.isTraceEnabled()) { - LOG.trace("Received result for endpoint {}: region={}, row={}, value={}", - methodDescriptor.getFullName(), Bytes.toStringBinary(region), - Bytes.toStringBinary(row), serviceResult.getValue().getValue()); - } - try { - Message.Builder builder = responsePrototype.newBuilderForType(); - org.apache.hadoop.hbase.protobuf.ProtobufUtil.mergeFrom(builder, - serviceResult.getValue().getValue().toByteArray()); - callback.update(region, row, (R) builder.build()); - } catch (IOException e) { - LOG.error("Unexpected response type from endpoint {}", methodDescriptor.getFullName(), - e); - callbackErrorExceptions.add(e); - callbackErrorActions.add(execsByRow.get(row)); - callbackErrorServers.add("null"); - } - }; + (byte[] region, byte[] row, ClientProtos.CoprocessorServiceResult serviceResult) -> { + if (LOG.isTraceEnabled()) { + LOG.trace("Received result for endpoint {}: region={}, row={}, value={}", + methodDescriptor.getFullName(), Bytes.toStringBinary(region), + Bytes.toStringBinary(row), serviceResult.getValue().getValue()); + } + try { + Message.Builder builder = responsePrototype.newBuilderForType(); + org.apache.hadoop.hbase.protobuf.ProtobufUtil.mergeFrom(builder, + serviceResult.getValue().getValue().toByteArray()); + callback.update(region, row, (R) builder.build()); + } catch (IOException e) { + LOG.error("Unexpected response type from endpoint {}", methodDescriptor.getFullName(), + e); + callbackErrorExceptions.add(e); + callbackErrorActions.add(execsByRow.get(row)); + callbackErrorServers.add("null"); + } + }; AsyncProcessTask task = - AsyncProcessTask.newBuilder(resultsCallback) - .setPool(context.wrap(pool)) - .setTableName(tableName) - .setRowAccess(execs) - .setResults(results) - .setRpcTimeout(readRpcTimeoutMs) - .setOperationTimeout(operationTimeoutMs) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL) - .build(); + AsyncProcessTask.newBuilder(resultsCallback).setPool(context.wrap(pool)) + .setTableName(tableName).setRowAccess(execs).setResults(results) + .setRpcTimeout(readRpcTimeoutMs).setOperationTimeout(operationTimeoutMs) + .setSubmittedRows(AsyncProcessTask.SubmittedRows.ALL).build(); AsyncRequestFuture future = asyncProcess.submit(task); future.waitUntilDone(); if (future.hasError()) { throw future.getErrors(); } else if (!callbackErrorExceptions.isEmpty()) { - throw new RetriesExhaustedWithDetailsException( - callbackErrorExceptions, callbackErrorActions, callbackErrorServers); + throw new RetriesExhaustedWithDetailsException(callbackErrorExceptions, + callbackErrorActions, callbackErrorServers); } }, supplier); } @@ -1434,8 +1358,8 @@ private class CheckAndMutateBuilderImpl implements CheckAndMutateBuilder { @Override public CheckAndMutateBuilder qualifier(byte[] qualifier) { - this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + - " an empty byte array, or just do not call this method if you want a null qualifier"); + this.qualifier = Preconditions.checkNotNull(qualifier, "qualifier is null. Consider using" + + " an empty byte array, or just do not call this method if you want a null qualifier"); return this; } @@ -1460,44 +1384,41 @@ public CheckAndMutateBuilder ifMatches(CompareOperator compareOp, byte[] value) } private void preCheck() { - Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" + - " calling ifNotExists/ifEquals/ifMatches before executing the request"); + Preconditions.checkNotNull(op, "condition is null. You need to specify the condition by" + + " calling ifNotExists/ifEquals/ifMatches before executing the request"); } @Override public boolean thenPut(Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace(() -> { validatePut(put); preCheck(); return doCheckAndMutate(row, family, qualifier, op, value, null, timeRange, put) - .isSuccess(); + .isSuccess(); }, supplier); } @Override public boolean thenDelete(Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace(() -> { preCheck(); return doCheckAndMutate(row, family, qualifier, op, value, null, timeRange, delete) - .isSuccess(); + .isSuccess(); }, supplier); } @Override public boolean thenMutate(RowMutations mutation) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace(() -> { preCheck(); return doCheckAndMutate(row, family, qualifier, op, value, null, timeRange, mutation) - .isSuccess(); + .isSuccess(); }, supplier); } } @@ -1522,20 +1443,17 @@ public CheckAndMutateWithFilterBuilder timeRange(TimeRange timeRange) { @Override public boolean thenPut(Put put) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace(() -> { validatePut(put); - return doCheckAndMutate(row, null, null, null, null, filter, timeRange, put) - .isSuccess(); + return doCheckAndMutate(row, null, null, null, null, filter, timeRange, put).isSuccess(); }, supplier); } @Override public boolean thenDelete(Delete delete) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); return TraceUtil.trace( () -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, delete).isSuccess(), supplier); @@ -1544,12 +1462,11 @@ public boolean thenDelete(Delete delete) throws IOException { @Override public boolean thenMutate(RowMutations mutation) throws IOException { final Supplier supplier = new TableOperationSpanBuilder(connection) - .setTableName(tableName) - .setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); - return TraceUtil.trace( - () -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, mutation) - .isSuccess(), - supplier); + .setTableName(tableName).setOperation(HBaseSemanticAttributes.Operation.CHECK_AND_MUTATE); + return TraceUtil + .trace(() -> doCheckAndMutate(row, null, null, null, null, filter, timeRange, mutation) + .isSuccess(), + supplier); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 539b02dba3ad..a298a9bf3252 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,11 +40,12 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * HTableMultiplexer provides a thread-safe non blocking PUT API across all the tables. Each put * will be sharded into different buffer queues based on its destination region server. So each @@ -108,14 +107,13 @@ public HTableMultiplexer(Connection conn, Configuration conf, this.pool = HTable.getDefaultExecutor(conf); // how many times we could try in total, one more than retry number this.maxAttempts = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, - HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + 1; + HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER) + 1; this.perRegionServerBufferQueueSize = perRegionServerBufferQueueSize; this.maxKeyValueSize = HTable.getMaxKeyValueSize(conf); this.flushPeriod = conf.getLong(TABLE_MULTIPLEXER_FLUSH_PERIOD_MS, 100); int initThreads = conf.getInt(TABLE_MULTIPLEXER_INIT_THREADS, 10); - this.executor = - Executors.newScheduledThreadPool(initThreads, - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HTableFlushWorker-%d").build()); + this.executor = Executors.newScheduledThreadPool(initThreads, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("HTableFlushWorker-%d").build()); this.workerConf = HBaseConfiguration.create(conf); // We do not do the retry because we need to reassign puts to different queues if regions are @@ -124,8 +122,8 @@ public HTableMultiplexer(Connection conn, Configuration conf, } /** - * Closes the internal {@link Connection}. Does nothing if the {@link Connection} has already - * been closed. + * Closes the internal {@link Connection}. Does nothing if the {@link Connection} has already been + * closed. * @throws IOException If there is an error closing the connection. */ public synchronized void close() throws IOException { @@ -146,17 +144,16 @@ public boolean put(TableName tableName, final Put put) { } /** - * The puts request will be buffered by their corresponding buffer queue. - * Return the list of puts which could not be queued. + * The puts request will be buffered by their corresponding buffer queue. Return the list of puts + * which could not be queued. * @param tableName * @param puts * @return the list of puts which could not be queued */ public List put(TableName tableName, final List puts) { - if (puts == null) - return null; + if (puts == null) return null; - List failedPuts = null; + List failedPuts = null; boolean result; for (Put put : puts) { result = put(tableName, put, this.maxAttempts); @@ -183,8 +180,7 @@ public List put(byte[] tableName, final List puts) { /** * The put request will be buffered by its corresponding buffer queue. And the put request will be - * retried before dropping the request. - * Return false if the queue is already full. + * retried before dropping the request. Return false if the queue is already full. * @return true if the request can be accepted by its corresponding buffer queue. */ public boolean put(final TableName tableName, final Put put, int maxAttempts) { @@ -279,8 +275,7 @@ public static class HTableMultiplexerStatus { private Map serverToAverageLatencyMap; private Map serverToMaxLatencyMap; - public HTableMultiplexerStatus( - Map serverToFlushWorkerMap) { + public HTableMultiplexerStatus(Map serverToFlushWorkerMap) { this.totalBufferedPutCounter = 0; this.totalFailedPutCounter = 0; this.maxLatency = 0; @@ -292,16 +287,14 @@ public HTableMultiplexerStatus( this.initialize(serverToFlushWorkerMap); } - private void initialize( - Map serverToFlushWorkerMap) { + private void initialize(Map serverToFlushWorkerMap) { if (serverToFlushWorkerMap == null) { return; } long averageCalcSum = 0; int averageCalcCount = 0; - for (Map.Entry entry : serverToFlushWorkerMap - .entrySet()) { + for (Map.Entry entry : serverToFlushWorkerMap.entrySet()) { HRegionLocation addr = entry.getKey(); FlushWorker worker = entry.getValue(); @@ -310,8 +303,7 @@ private void initialize( long serverMaxLatency = worker.getMaxLatency(); AtomicAverageCounter averageCounter = worker.getAverageLatencyCounter(); // Get sum and count pieces separately to compute overall average - SimpleEntry averageComponents = averageCounter - .getComponents(); + SimpleEntry averageComponents = averageCounter.getComponents(); long serverAvgLatency = averageCounter.getAndReset(); this.totalBufferedPutCounter += bufferedCounter; @@ -322,19 +314,12 @@ private void initialize( averageCalcSum += averageComponents.getKey(); averageCalcCount += averageComponents.getValue(); - this.serverToBufferedCounterMap.put(addr.getHostnamePort(), - bufferedCounter); - this.serverToFailedCounterMap - .put(addr.getHostnamePort(), - failedCounter); - this.serverToAverageLatencyMap.put(addr.getHostnamePort(), - serverAvgLatency); - this.serverToMaxLatencyMap - .put(addr.getHostnamePort(), - serverMaxLatency); + this.serverToBufferedCounterMap.put(addr.getHostnamePort(), bufferedCounter); + this.serverToFailedCounterMap.put(addr.getHostnamePort(), failedCounter); + this.serverToAverageLatencyMap.put(addr.getHostnamePort(), serverAvgLatency); + this.serverToMaxLatencyMap.put(addr.getHostnamePort(), serverMaxLatency); } - this.overallAverageLatency = averageCalcCount != 0 ? averageCalcSum - / averageCalcCount : 0; + this.overallAverageLatency = averageCalcCount != 0 ? averageCalcSum / averageCalcCount : 0; } public long getTotalBufferedCounter() { @@ -441,6 +426,7 @@ static class FlushWorker implements Runnable { private final int writeRpcTimeout; // needed to pass in through AsyncProcess constructor private final int operationTimeout; private final ExecutorService pool; + public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation addr, HTableMultiplexer htableMultiplexer, int perRegionServerBufferQueueSize, ExecutorService pool, ScheduledExecutorService executor) { @@ -450,10 +436,9 @@ public FlushWorker(Configuration conf, ClusterConnection conn, HRegionLocation a RpcRetryingCallerFactory rpcCallerFactory = RpcRetryingCallerFactory.instantiate(conf); RpcControllerFactory rpcControllerFactory = RpcControllerFactory.instantiate(conf); this.writeRpcTimeout = conf.getInt(HConstants.HBASE_RPC_WRITE_TIMEOUT_KEY, - conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); + conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT)); this.operationTimeout = conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); this.ap = new AsyncProcess(conn, conf, rpcCallerFactory, rpcControllerFactory); this.executor = executor; this.maxRetryInQueue = conf.getInt(TABLE_MULTIPLEXER_MAX_RETRIES_IN_QUEUE, 10000); @@ -529,7 +514,7 @@ public void run() { @InterfaceAudience.Private long getNextDelay(int retryCount) { return ConnectionUtils.getPauseTime(multiplexer.flushPeriod, - multiplexer.maxAttempts - retryCount - 1); + multiplexer.maxAttempts - retryCount - 1); } @InterfaceAudience.Private @@ -588,22 +573,18 @@ public void run() { List failed = null; Object[] results = new Object[actions.size()]; ServerName server = addr.getServerName(); - Map actionsByServer = - Collections.singletonMap(server, actions); + Map actionsByServer = Collections.singletonMap(server, actions); try { - AsyncProcessTask task = AsyncProcessTask.newBuilder() - .setResults(results) - .setPool(pool) - .setRpcTimeout(writeRpcTimeout) - .setOperationTimeout(operationTimeout) - .build(); + AsyncProcessTask task = AsyncProcessTask.newBuilder().setResults(results).setPool(pool) + .setRpcTimeout(writeRpcTimeout).setOperationTimeout(operationTimeout).build(); AsyncRequestFuture arf = ap.submitMultiActions(task, retainedActions, 0L, null, null, actionsByServer); arf.waitUntilDone(); if (arf.hasError()) { // We just log and ignore the exception here since failed Puts will be resubmit again. LOG.debug("Caught some exceptions when flushing puts to region server " - + addr.getHostnamePort(), arf.getErrors()); + + addr.getHostnamePort(), + arf.getErrors()); } } finally { for (int i = 0; i < results.length; i++) { @@ -636,9 +617,9 @@ public void run() { // Log some basic info if (LOG.isDebugEnabled()) { - LOG.debug("Processed " + currentProcessingCount + " put requests for " - + addr.getHostnamePort() + " and " + failedCount + " failed" - + ", latency for this send: " + elapsed); + LOG.debug( + "Processed " + currentProcessingCount + " put requests for " + addr.getHostnamePort() + + " and " + failedCount + " failed" + ", latency for this send: " + elapsed); } // Reset the current processing put count @@ -646,17 +627,17 @@ public void run() { } catch (RuntimeException e) { // To make findbugs happy // Log all the exceptions and move on - LOG.debug( - "Caught some exceptions " + e + " when flushing puts to region server " - + addr.getHostnamePort(), e); + LOG.debug("Caught some exceptions " + e + " when flushing puts to region server " + + addr.getHostnamePort(), + e); } catch (Exception e) { if (e instanceof InterruptedException) { Thread.currentThread().interrupt(); } // Log all the exceptions and move on - LOG.debug( - "Caught some exceptions " + e + " when flushing puts to region server " - + addr.getHostnamePort(), e); + LOG.debug("Caught some exceptions " + e + " when flushing puts to region server " + + addr.getHostnamePort(), + e); } finally { // Update the totalFailedCount this.totalFailedPutCount.addAndGet(failedCount); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java index 99befa4aa112..25c7c380de85 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Hbck.java @@ -34,11 +34,11 @@ /** * Hbck fixup tool APIs. Obtain an instance from {@link ClusterConnection#getHbck()} and call * {@link #close()} when done. - *

    WARNING: the below methods can damage the cluster. It may leave the cluster in an - * indeterminate state, e.g. region not assigned, or some hdfs files left behind. After running - * any of the below, operators may have to do some clean up on hdfs or schedule some assign - * procedures to get regions back online. DO AT YOUR OWN RISK. For experienced users only. - * + *

    + * WARNING: the below methods can damage the cluster. It may leave the cluster in an indeterminate + * state, e.g. region not assigned, or some hdfs files left behind. After running any of the below, + * operators may have to do some clean up on hdfs or schedule some assign procedures to get regions + * back online. DO AT YOUR OWN RISK. For experienced users only. * @see ConnectionFactory * @see ClusterConnection * @since 2.0.2, 2.1.1 @@ -46,8 +46,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.HBCK) public interface Hbck extends Abortable, Closeable { /** - * Update table state in Meta only. No procedures are submitted to open/assign or - * close/unassign regions of the table. + * Update table state in Meta only. No procedures are submitted to open/assign or close/unassign + * regions of the table. * @param state table state * @return previous state of the table in Meta */ @@ -59,21 +59,21 @@ public interface Hbck extends Abortable, Closeable { * @param nameOrEncodedName2State list of all region states to be updated in meta * @return previous state of the region in Meta */ - Map - setRegionStateInMeta(Map nameOrEncodedName2State) throws IOException; + Map setRegionStateInMeta( + Map nameOrEncodedName2State) throws IOException; /** - * Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time - * -- good if many Regions to online -- and it will schedule the assigns even in the case where + * Like {@link Admin#assign(byte[])} but 'raw' in that it can do more than one Region at a time -- + * good if many Regions to online -- and it will schedule the assigns even in the case where * Master is initializing (as long as the ProcedureExecutor is up). Does NOT call Coprocessor * hooks. * @param override You need to add the override for case where a region has previously been - * bypassed. When a Procedure has been bypassed, a Procedure will have completed - * but no other Procedure will be able to make progress on the target entity - * (intentionally). This override flag will override this fencing mechanism. - * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding - * for hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an - * example of what a random user-space encoded Region name looks like. + * bypassed. When a Procedure has been bypassed, a Procedure will have completed but no + * other Procedure will be able to make progress on the target entity (intentionally). + * This override flag will override this fencing mechanism. + * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for + * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example of what a random + * user-space encoded Region name looks like. */ List assigns(List encodedRegionNames, boolean override) throws IOException; @@ -87,12 +87,12 @@ default List assigns(List encodedRegionNames) throws IOException { * case where Master is initializing (as long as the ProcedureExecutor is up). Does NOT call * Coprocessor hooks. * @param override You need to add the override for case where a region has previously been - * bypassed. When a Procedure has been bypassed, a Procedure will have completed - * but no other Procedure will be able to make progress on the target entity - * (intentionally). This override flag will override this fencing mechanism. - * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding - * for hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an - * example of what a random user-space encoded Region name looks like. + * bypassed. When a Procedure has been bypassed, a Procedure will have completed but no + * other Procedure will be able to make progress on the target entity (intentionally). + * This override flag will override this fencing mechanism. + * @param encodedRegionNames Region encoded names; e.g. 1588230740 is the hard-coded encoding for + * hbase:meta region and de00010733901a05f5a2a3a382e27dd4 is an example of what a random + * user-space encoded Region name looks like. */ List unassigns(List encodedRegionNames, boolean override) throws IOException; @@ -101,17 +101,16 @@ default List unassigns(List encodedRegionNames) throws IOException } /** - * Bypass specified procedure and move it to completion. Procedure is marked completed but - * no actual work is done from the current state/step onwards. Parents of the procedure are - * also marked for bypass. - * + * Bypass specified procedure and move it to completion. Procedure is marked completed but no + * actual work is done from the current state/step onwards. Parents of the procedure are also + * marked for bypass. * @param pids of procedures to complete. * @param waitTime wait time in ms for acquiring lock for a procedure * @param override if override set to true, we will bypass the procedure even if it is executing. - * This is for procedures which can't break out during execution (bugs?). - * @param recursive If set, if a parent procedure, we will find and bypass children and then - * the parent procedure (Dangerous but useful in case where child procedure has been 'lost'). - * Does not always work. Experimental. + * This is for procedures which can't break out during execution (bugs?). + * @param recursive If set, if a parent procedure, we will find and bypass children and then the + * parent procedure (Dangerous but useful in case where child procedure has been 'lost'). + * Does not always work. Experimental. * @return true if procedure is marked for bypass successfully, false otherwise */ List bypassProcedure(List pids, long waitTime, boolean override, boolean recursive) @@ -125,7 +124,7 @@ List bypassProcedure(List pids, long waitTime, boolean override, default List scheduleServerCrashProcedure(List serverNames) throws IOException { return scheduleServerCrashProcedures( - serverNames.stream().map(ProtobufUtil::toServerName).collect(Collectors.toList())); + serverNames.stream().map(ProtobufUtil::toServerName).collect(Collectors.toList())); } List scheduleServerCrashProcedures(List serverNames) throws IOException; @@ -134,7 +133,6 @@ default List scheduleServerCrashProcedure(List ser /** * Request HBCK chore to run at master side. - * * @return true if HBCK chore ran, false if HBCK chore already running * @throws IOException if a remote or network exception occurs */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java index aa132afa7cce..4287e0227480 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHColumnDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,8 +18,8 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.HColumnDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor; +import org.apache.yetus.audience.InterfaceAudience; /** * Read-only column descriptor. @@ -36,8 +36,8 @@ public class ImmutableHColumnDescriptor extends HColumnDescriptor { } public ImmutableHColumnDescriptor(final ColumnFamilyDescriptor desc) { - super(desc instanceof ModifyableColumnFamilyDescriptor ? - (ModifyableColumnFamilyDescriptor) desc : new ModifyableColumnFamilyDescriptor(desc)); + super(desc instanceof ModifyableColumnFamilyDescriptor ? (ModifyableColumnFamilyDescriptor) desc + : new ModifyableColumnFamilyDescriptor(desc)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java index ed00ceedce52..e0ae2d54221f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHRegionInfo.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +29,6 @@ public class ImmutableHRegionInfo extends HRegionInfo { /* * Creates an immutable copy of an HRegionInfo. - * * @param other */ public ImmutableHRegionInfo(RegionInfo other) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java index 8539cef0a8ea..c706aaa25cc8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableHTableDescriptor.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,6 +39,7 @@ protected HColumnDescriptor toHColumnDescriptor(ColumnFamilyDescriptor desc) { return new ImmutableHColumnDescriptor(desc); } } + /* * Create an unmodifyable copy of an HTableDescriptor * @param desc @@ -49,8 +49,8 @@ public ImmutableHTableDescriptor(final HTableDescriptor desc) { } public ImmutableHTableDescriptor(final TableDescriptor desc) { - super(desc instanceof ModifyableTableDescriptor ? - (ModifyableTableDescriptor) desc : new ModifyableTableDescriptor(desc.getTableName(), desc)); + super(desc instanceof ModifyableTableDescriptor ? (ModifyableTableDescriptor) desc + : new ModifyableTableDescriptor(desc.getTableName(), desc)); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java index e0d4ad781209..0876ca4ac462 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ImmutableScan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -41,7 +39,6 @@ public final class ImmutableScan extends Scan { /** * Create Immutable instance of Scan from given Scan object - * * @param scan Copy all values from Scan */ public ImmutableScan(Scan scan) { @@ -77,7 +74,7 @@ public Scan setTimestamp(long timestamp) { @Override public Scan setColumnFamilyTimeRange(byte[] cf, long minStamp, long maxStamp) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setColumnFamilyTimeRange"); + "ImmutableScan does not allow access to setColumnFamilyTimeRange"); } @Override @@ -103,13 +100,13 @@ public Scan withStopRow(byte[] stopRow, boolean inclusive) { @Override public Scan setStartStopRowForPrefixScan(byte[] rowPrefix) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setStartStopRowForPrefixScan"); + "ImmutableScan does not allow access to setStartStopRowForPrefixScan"); } @Override public Scan readAllVersions() { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to readAllVersions"); + "ImmutableScan does not allow access to readAllVersions"); } @Override @@ -125,13 +122,13 @@ public Scan setBatch(int batch) { @Override public Scan setMaxResultsPerColumnFamily(int limit) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setMaxResultsPerColumnFamily"); + "ImmutableScan does not allow access to setMaxResultsPerColumnFamily"); } @Override public Scan setRowOffsetPerColumnFamily(int offset) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setRowOffsetPerColumnFamily"); + "ImmutableScan does not allow access to setRowOffsetPerColumnFamily"); } @Override @@ -142,7 +139,7 @@ public Scan setCaching(int caching) { @Override public Scan setMaxResultSize(long maxResultSize) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setMaxResultSize"); + "ImmutableScan does not allow access to setMaxResultSize"); } @Override @@ -158,7 +155,7 @@ public Scan setFamilyMap(Map> familyMap) { @Override public Scan setCacheBlocks(boolean cacheBlocks) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setCacheBlocks"); + "ImmutableScan does not allow access to setCacheBlocks"); } @Override @@ -169,13 +166,13 @@ public Scan setReversed(boolean reversed) { @Override public Scan setAllowPartialResults(final boolean allowPartialResults) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setAllowPartialResults"); + "ImmutableScan does not allow access to setAllowPartialResults"); } @Override public Scan setLoadColumnFamiliesOnDemand(boolean value) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setLoadColumnFamiliesOnDemand"); + "ImmutableScan does not allow access to setLoadColumnFamiliesOnDemand"); } @Override @@ -202,7 +199,7 @@ public Scan setId(String id) { @Override public Scan setAuthorizations(Authorizations authorizations) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setAuthorizations"); + "ImmutableScan does not allow access to setAuthorizations"); } @Override @@ -218,7 +215,7 @@ public Scan setACL(String user, Permission perms) { @Override public Scan setConsistency(Consistency consistency) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setConsistency"); + "ImmutableScan does not allow access to setConsistency"); } @Override @@ -229,7 +226,7 @@ public Scan setReplicaId(int id) { @Override public Scan setIsolationLevel(IsolationLevel level) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setIsolationLevel"); + "ImmutableScan does not allow access to setIsolationLevel"); } @Override @@ -240,14 +237,14 @@ public Scan setPriority(int priority) { @Override public Scan setScanMetricsEnabled(final boolean enabled) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setScanMetricsEnabled"); + "ImmutableScan does not allow access to setScanMetricsEnabled"); } @Override @Deprecated public Scan setAsyncPrefetch(boolean asyncPrefetch) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setAsyncPrefetch"); + "ImmutableScan does not allow access to setAsyncPrefetch"); } @Override @@ -258,7 +255,7 @@ public Scan setLimit(int limit) { @Override public Scan setOneRowLimit() { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setOneRowLimit"); + "ImmutableScan does not allow access to setOneRowLimit"); } @Override @@ -269,19 +266,19 @@ public Scan setReadType(ReadType readType) { @Override Scan setMvccReadPoint(long mvccReadPoint) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setMvccReadPoint"); + "ImmutableScan does not allow access to setMvccReadPoint"); } @Override Scan resetMvccReadPoint() { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to resetMvccReadPoint"); + "ImmutableScan does not allow access to resetMvccReadPoint"); } @Override public Scan setNeedCursorResult(boolean needCursorResult) { throw new UnsupportedOperationException( - "ImmutableScan does not allow access to setNeedCursorResult"); + "ImmutableScan does not allow access to setNeedCursorResult"); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index d7d11160a78e..0ee0a8b131ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,12 +36,12 @@ /** * Used to perform Increment operations on a single row. *

    - * This operation ensures atomicity to readers. Increments are done - * under a single row lock, so write operations to a row are synchronized, and - * readers are guaranteed to see this operation fully completed. + * This operation ensures atomicity to readers. Increments are done under a single row lock, so + * write operations to a row are synchronized, and readers are guaranteed to see this operation + * fully completed. *

    - * To increment columns of a row, instantiate an Increment object with the row - * to increment. At least one column to increment must be specified using the + * To increment columns of a row, instantiate an Increment object with the row to increment. At + * least one column to increment must be specified using the * {@link #addColumn(byte[], byte[], long)} method. */ @InterfaceAudience.Public @@ -56,7 +55,7 @@ public class Increment extends Mutation { * At least one column must be incremented. * @param row row key (we will make a copy of this). */ - public Increment(byte [] row) { + public Increment(byte[] row) { this(row, 0, row.length); } @@ -66,10 +65,11 @@ public Increment(byte [] row) { * At least one column must be incremented. * @param row row key (we will make a copy of this). */ - public Increment(final byte [] row, final int offset, final int length) { + public Increment(final byte[] row, final int offset, final int length) { checkRow(row, offset, length); this.row = Bytes.copy(row, offset, length); } + /** * Copy constructor * @param incrementToCopy increment to copy @@ -80,14 +80,13 @@ public Increment(Increment incrementToCopy) { } /** - * Construct the Increment with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. + * Construct the Increment with user defined data. NOTED: 1) all cells in the familyMap must have + * the Type.Put 2) the row of each cell must be same with passed row. * @param row row. CAN'T be null * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Increment(byte[] row, long ts, NavigableMap> familyMap) { + public Increment(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } @@ -97,14 +96,14 @@ public Increment(byte[] row, long ts, NavigableMap> familyMa * @return this * @throws java.io.IOException e */ - public Increment add(Cell cell) throws IOException{ + public Increment add(Cell cell) throws IOException { super.add(cell); return this; } /** - * Increment the column from the specific family with the specified qualifier - * by the specified amount. + * Increment the column from the specific family with the specified qualifier by the specified + * amount. *

    * Overrides previous calls to addColumn for this family and qualifier. * @param family family name @@ -112,7 +111,7 @@ public Increment add(Cell cell) throws IOException{ * @param amount amount to increment by * @return the Increment object */ - public Increment addColumn(byte [] family, byte [] qualifier, long amount) { + public Increment addColumn(byte[] family, byte[] qualifier, long amount) { if (family == null) { throw new IllegalArgumentException("family cannot be null"); } @@ -133,12 +132,11 @@ public TimeRange getTimeRange() { /** * Sets the TimeRange to be used on the Get for this increment. *

    - * This is useful for when you have counters that only last for specific - * periods of time (ie. counters that are partitioned by time). By setting - * the range of valid times for this increment, you can potentially gain - * some performance with a more optimal Get operation. - * Be careful adding the time range to this class as you will update the old cell if the - * time range doesn't include the latest cells. + * This is useful for when you have counters that only last for specific periods of time (ie. + * counters that are partitioned by time). By setting the range of valid times for this increment, + * you can potentially gain some performance with a more optimal Get operation. Be careful adding + * the time range to this class as you will update the old cell if the time range doesn't include + * the latest cells. *

    * This range is used as [minStamp, maxStamp). * @param minStamp minimum timestamp value, inclusive @@ -146,8 +144,7 @@ public TimeRange getTimeRange() { * @throws IOException if invalid time range * @return this */ - public Increment setTimeRange(long minStamp, long maxStamp) - throws IOException { + public Increment setTimeRange(long minStamp, long maxStamp) throws IOException { tr = new TimeRange(minStamp, maxStamp); return this; } @@ -160,8 +157,8 @@ public Increment setTimestamp(long timestamp) { /** * @param returnResults True (default) if the increment operation should return the results. A - * client that is not interested in the result can save network bandwidth setting this - * to false. + * client that is not interested in the result can save network bandwidth setting this to + * false. */ @Override public Increment setReturnResults(boolean returnResults) { @@ -196,21 +193,20 @@ public boolean hasFamilies() { } /** - * Before 0.95, when you called Increment#getFamilyMap(), you got back - * a map of families to a list of Longs. Now, {@link #getFamilyCellMap()} returns - * families by list of Cells. This method has been added so you can have the - * old behavior. + * Before 0.95, when you called Increment#getFamilyMap(), you got back a map of families to a list + * of Longs. Now, {@link #getFamilyCellMap()} returns families by list of Cells. This method has + * been added so you can have the old behavior. * @return Map of families to a Map of qualifiers and their Long increments. * @since 0.95.0 */ - public Map> getFamilyMapOfLongs() { + public Map> getFamilyMapOfLongs() { NavigableMap> map = super.getFamilyCellMap(); - Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Map.Entry> entry: map.entrySet()) { - NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); - for (Cell cell: entry.getValue()) { + Map> results = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Map.Entry> entry : map.entrySet()) { + NavigableMap longs = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (Cell cell : entry.getValue()) { longs.put(CellUtil.cloneQualifier(cell), - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } results.put(entry.getKey(), longs); } @@ -225,14 +221,14 @@ public String toString() { StringBuilder sb = new StringBuilder(); sb.append("row="); sb.append(Bytes.toStringBinary(this.row)); - if(this.familyMap.isEmpty()) { + if (this.familyMap.isEmpty()) { sb.append(", no columns set to be incremented"); return sb.toString(); } sb.append(", families="); boolean moreThanOne = false; - for(Map.Entry> entry: this.familyMap.entrySet()) { - if(moreThanOne) { + for (Map.Entry> entry : this.familyMap.entrySet()) { + if (moreThanOne) { sb.append("), "); } else { moreThanOne = true; @@ -241,19 +237,19 @@ public String toString() { sb.append("(family="); sb.append(Bytes.toString(entry.getKey())); sb.append(", columns="); - if(entry.getValue() == null) { + if (entry.getValue() == null) { sb.append("NONE"); } else { sb.append("{"); boolean moreThanOneB = false; - for(Cell cell : entry.getValue()) { - if(moreThanOneB) { + for (Cell cell : entry.getValue()) { + if (moreThanOneB) { sb.append(", "); } else { moreThanOneB = true; } - sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + - Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + sb.append(CellUtil.getCellKeyAsString(cell) + "+=" + + Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); } sb.append("}"); } @@ -263,25 +259,24 @@ public String toString() { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * No replacement. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. No replacement. */ @Deprecated @Override public int hashCode() { - // TODO: This is wrong. Can't have two gets the same just because on same row. But it + // TODO: This is wrong. Can't have two gets the same just because on same row. But it // matches how equals works currently and gets rid of the findbugs warning. return Bytes.hashCode(this.getRow()); } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Row#COMPARATOR} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Row#COMPARATOR} instead */ @Deprecated @Override public boolean equals(Object obj) { - // TODO: This is wrong. Can't have two the same just because on same row. + // TODO: This is wrong. Can't have two the same just because on same row. if (this == obj) { return true; } @@ -293,7 +288,7 @@ public boolean equals(Object obj) { } @Override - protected long extraHeapSize(){ + protected long extraHeapSize() { return HEAP_OVERHEAD; } @@ -314,8 +309,8 @@ public Increment setDurability(Durability d) { /** * Method for setting the Increment's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Increment#Increment(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Increment#Increment(byte[], long, NavigableMap)} instead */ @Deprecated @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java index ba7609087001..f77a13b2406b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/IsolationLevel.java @@ -1,6 +1,4 @@ /* - * Copyright The Apache Software Foundation - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; @@ -25,29 +22,27 @@ /** * Specify Isolation levels in Scan operations. *

    - * There are two isolation levels. A READ_COMMITTED isolation level - * indicates that only data that is committed be returned in a scan. - * An isolation level of READ_UNCOMMITTED indicates that a scan - * should return data that is being modified by transactions that might - * not have been committed yet. + * There are two isolation levels. A READ_COMMITTED isolation level indicates that only data that is + * committed be returned in a scan. An isolation level of READ_UNCOMMITTED indicates that a scan + * should return data that is being modified by transactions that might not have been committed yet. */ @InterfaceAudience.Public public enum IsolationLevel { - READ_COMMITTED(1), - READ_UNCOMMITTED(2); + READ_COMMITTED(1), READ_UNCOMMITTED(2); - IsolationLevel(int value) {} + IsolationLevel(int value) { + } - public byte [] toBytes() { - return new byte [] { toByte() }; + public byte[] toBytes() { + return new byte[] { toByte() }; } public byte toByte() { - return (byte)this.ordinal(); + return (byte) this.ordinal(); } - public static IsolationLevel fromBytes(byte [] bytes) { + public static IsolationLevel fromBytes(byte[] bytes) { return IsolationLevel.fromByte(bytes[0]); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LockTimeoutException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LockTimeoutException.java index b949f0e2ecb9..a14998a04daa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LockTimeoutException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LockTimeoutException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,7 @@ import org.apache.yetus.audience.InterfaceAudience; /* - Thrown whenever we are not able to get the lock within the specified wait time. + * Thrown whenever we are not able to get the lock within the specified wait time. */ @InterfaceAudience.Public public class LockTimeoutException extends HBaseIOException { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java index 41f79cf8e813..807c7f1f435d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogEntry.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Abstract response class representing online logs response from ring-buffer use-cases - * e.g slow/large RPC logs, balancer decision logs + * Abstract response class representing online logs response from ring-buffer use-cases e.g + * slow/large RPC logs, balancer decision logs */ @InterfaceAudience.Public @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java index 506fc4f76521..74db8afe4628 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/LogQueryFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -26,8 +24,8 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Slow/Large Log Query Filter with all filter and limit parameters - * Extends generic LogRequest used by Admin API getLogEntries + * Slow/Large Log Query Filter with all filter and limit parameters Extends generic LogRequest used + * by Admin API getLogEntries * @deprecated as of 2.4.0. Will be removed in 4.0.0. */ @InterfaceAudience.Public @@ -44,13 +42,11 @@ public class LogQueryFilter { private FilterByOperator filterByOperator = FilterByOperator.OR; public enum Type { - SLOW_LOG, - LARGE_LOG + SLOW_LOG, LARGE_LOG } public enum FilterByOperator { - AND, - OR + AND, OR } public String getRegionName() { @@ -121,41 +117,24 @@ public boolean equals(Object o) { LogQueryFilter that = (LogQueryFilter) o; - return new EqualsBuilder() - .append(limit, that.limit) - .append(regionName, that.regionName) - .append(clientAddress, that.clientAddress) - .append(tableName, that.tableName) - .append(userName, that.userName) - .append(type, that.type) - .append(filterByOperator, that.filterByOperator) - .isEquals(); + return new EqualsBuilder().append(limit, that.limit).append(regionName, that.regionName) + .append(clientAddress, that.clientAddress).append(tableName, that.tableName) + .append(userName, that.userName).append(type, that.type) + .append(filterByOperator, that.filterByOperator).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(regionName) - .append(clientAddress) - .append(tableName) - .append(userName) - .append(limit) - .append(type) - .append(filterByOperator) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(regionName).append(clientAddress).append(tableName) + .append(userName).append(limit).append(type).append(filterByOperator).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this) - .append("regionName", regionName) - .append("clientAddress", clientAddress) - .append("tableName", tableName) - .append("userName", userName) - .append("limit", limit) - .append("type", type) - .append("filterByOperator", filterByOperator) - .toString(); + return new ToStringBuilder(this).append("regionName", regionName) + .append("clientAddress", clientAddress).append("tableName", tableName) + .append("userName", userName).append("limit", limit).append("type", type) + .append("filterByOperator", filterByOperator).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java index 7ae97314c480..6659d10ec616 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.Closeable; @@ -29,16 +28,17 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** - * A RetryingCallable for Master RPC operations. - * Implement the #rpcCall method. It will be retried on error. See its javadoc and the javadoc of - * #call(int). See {@link HBaseAdmin} for examples of how this is used. To get at the - * rpcController that has been created and configured to make this rpc call, use getRpcController(). - * We are trying to contain all protobuf references including references to rpcController so we - * don't pollute codebase with protobuf references; keep the protobuf references contained and only - * present in a few classes rather than all about the code base. - *

    Like {@link RegionServerCallable} only in here, we can safely be PayloadCarryingRpcController - * all the time. This is not possible in the similar {@link RegionServerCallable} Callable because - * it has to deal with Coprocessor Endpoints. + * A RetryingCallable for Master RPC operations. Implement the #rpcCall method. It will be retried + * on error. See its javadoc and the javadoc of #call(int). See {@link HBaseAdmin} for examples of + * how this is used. To get at the rpcController that has been created and configured to make this + * rpc call, use getRpcController(). We are trying to contain all protobuf references including + * references to rpcController so we don't pollute codebase with protobuf references; keep the + * protobuf references contained and only present in a few classes rather than all about the code + * base. + *

    + * Like {@link RegionServerCallable} only in here, we can safely be PayloadCarryingRpcController all + * the time. This is not possible in the similar {@link RegionServerCallable} Callable because it + * has to deal with Coprocessor Endpoints. * @param return type */ @InterfaceAudience.Private @@ -81,10 +81,10 @@ public long sleep(long pause, int tries) { } /** - * Override that changes the {@link java.util.concurrent.Callable#call()} Exception from {@link Exception} to - * {@link IOException}. It also does setup of an rpcController and calls through to the rpcCall() - * method which callers are expected to implement. If rpcController is an instance of - * PayloadCarryingRpcController, we will set a timeout on it. + * Override that changes the {@link java.util.concurrent.Callable#call()} Exception from + * {@link Exception} to {@link IOException}. It also does setup of an rpcController and calls + * through to the rpcCall() method which callers are expected to implement. If rpcController is an + * instance of PayloadCarryingRpcController, we will set a timeout on it. */ @Override // Same trick as in RegionServerCallable so users don't have to copy/paste so much boilerplate @@ -103,11 +103,11 @@ public V call(int callTimeout) throws IOException { } /** - * Run the RPC call. Implement this method. To get at the rpcController that has been created - * and configured to make this rpc call, use getRpcController(). We are trying to contain + * Run the RPC call. Implement this method. To get at the rpcController that has been created and + * configured to make this rpc call, use getRpcController(). We are trying to contain * rpcController references so we don't pollute codebase with protobuf references; keep the - * protobuf references contained and only present in a few classes rather than all about the - * code base. + * protobuf references contained and only present in a few classes rather than all about the code + * base. * @throws Exception */ protected abstract V rpcCall() throws Exception; @@ -138,7 +138,8 @@ void setPriority(final byte[] regionName) { } private static boolean isMetaRegion(final byte[] regionName) { - return Bytes.equals(regionName, RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) || - Bytes.equals(regionName, RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); + return Bytes.equals(regionName, RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) + || Bytes.equals(regionName, + RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java index 9e68a16bc306..b8ffe054fa05 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterCoprocessorRpcChannelImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,9 +52,7 @@ private CompletableFuture rpcCall(MethodDescriptor method, Message requ CompletableFuture future = new CompletableFuture<>(); CoprocessorServiceRequest csr = CoprocessorRpcUtils.getCoprocessorServiceRequest(method, request); - stub.execMasterService( - controller, - csr, + stub.execMasterService(controller, csr, new org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback() { @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.java index b1c37776f9e4..b8a957662b4b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterKeepAliveConnection.java @@ -1,6 +1,4 @@ -/** - * Copyright The Apache Software Foundation - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,22 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos; + /** - * A KeepAlive connection is not physically closed immediately after the close, - * but rather kept alive for a few minutes. It makes sense only if it is shared. - * - *

    This interface is implemented on a stub. It allows to have a #close function in a master - * client. - * - *

    This class is intended to be used internally by HBase classes that need to make invocations - * against the master on the MasterProtos.MasterService.BlockingInterface; but not by - * final user code. Hence it's package protected. + * A KeepAlive connection is not physically closed immediately after the close, but rather kept + * alive for a few minutes. It makes sense only if it is shared. + *

    + * This interface is implemented on a stub. It allows to have a #close function in a master client. + *

    + * This class is intended to be used internally by HBase classes that need to make invocations + * against the master on the MasterProtos.MasterService.BlockingInterface; but not by final user + * code. Hence it's package protected. */ @InterfaceAudience.Private interface MasterKeepAliveConnection extends MasterProtos.MasterService.BlockingInterface { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java index 05773d0b4195..1d8bdd70f646 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterRegistry.java @@ -57,16 +57,16 @@ public class MasterRegistry extends AbstractRpcBasedConnectionRegistry { /** Configuration key that controls the fan out of requests **/ public static final String MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY = - "hbase.client.master_registry.hedged.fanout"; + "hbase.client.master_registry.hedged.fanout"; public static final String MASTER_REGISTRY_INITIAL_REFRESH_DELAY_SECS = - "hbase.client.master_registry.initial_refresh_delay_secs"; + "hbase.client.master_registry.initial_refresh_delay_secs"; public static final String MASTER_REGISTRY_PERIODIC_REFRESH_INTERVAL_SECS = - "hbase.client.master_registry.refresh_interval_secs"; + "hbase.client.master_registry.refresh_interval_secs"; public static final String MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES = - "hbase.client.master_registry.min_secs_between_refreshes"; + "hbase.client.master_registry.min_secs_between_refreshes"; private static final String MASTER_ADDRS_CONF_SEPARATOR = ","; @@ -80,7 +80,7 @@ public static Set parseMasterAddrs(Configuration conf) throws Unknow String configuredMasters = getMasterAddr(conf); for (String masterAddr : configuredMasters.split(MASTER_ADDRS_CONF_SEPARATOR)) { HostAndPort masterHostPort = - HostAndPort.fromString(masterAddr.trim()).withDefaultPort(HConstants.DEFAULT_MASTER_PORT); + HostAndPort.fromString(masterAddr.trim()).withDefaultPort(HConstants.DEFAULT_MASTER_PORT); masterAddrs.add(ServerName.valueOf(masterHostPort.toString(), ServerName.NON_STARTCODE)); } Preconditions.checkArgument(!masterAddrs.isEmpty(), "At least one master address is needed"); @@ -91,7 +91,7 @@ public static Set parseMasterAddrs(Configuration conf) throws Unknow MasterRegistry(Configuration conf) throws IOException { super(conf, MASTER_REGISTRY_HEDGED_REQS_FANOUT_KEY, MASTER_REGISTRY_INITIAL_REFRESH_DELAY_SECS, - MASTER_REGISTRY_PERIODIC_REFRESH_INTERVAL_SECS, MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES); + MASTER_REGISTRY_PERIODIC_REFRESH_INTERVAL_SECS, MASTER_REGISTRY_MIN_SECS_BETWEEN_REFRESHES); connectionString = getConnectionString(conf); } @@ -131,16 +131,16 @@ public static String getMasterAddr(Configuration conf) throws UnknownHostExcepti private static Set transformServerNames(GetMastersResponse resp) { return resp.getMasterServersList().stream() - .map(s -> ProtobufUtil.toServerName(s.getServerName())).collect(Collectors.toSet()); + .map(s -> ProtobufUtil.toServerName(s.getServerName())).collect(Collectors.toSet()); } @RestrictedApi(explanation = "Should only be called in tests", link = "", - allowedOnPath = ".*/(.*/MasterRegistry.java|src/test/.*)") + allowedOnPath = ".*/(.*/MasterRegistry.java|src/test/.*)") CompletableFuture> getMasters() { return this - . call( - (c, s, d) -> s.getMasters(c, GetMastersRequest.getDefaultInstance(), d), - r -> r.getMasterServersCount() != 0, "getMasters()") - .thenApply(MasterRegistry::transformServerNames); + . call( + (c, s, d) -> s.getMasters(c, GetMastersRequest.getDefaultInstance(), d), + r -> r.getMasterServersCount() != 0, "getMasters()") + .thenApply(MasterRegistry::transformServerNames); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java index 6d4b85cfc51e..127c22bd36af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MasterSwitchType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,12 +16,13 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import org.apache.yetus.audience.InterfaceAudience; + /** * Represents the master switch type */ @InterfaceAudience.Public public enum MasterSwitchType { - SPLIT, - MERGE + SPLIT, MERGE } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java index fde2838acc26..222b10007d47 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetaCache.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.util.ConcurrentMapUtils.computeIfAbsent; @@ -26,7 +25,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.CopyOnWriteArraySet; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.RegionLocations; @@ -49,8 +47,8 @@ public class MetaCache { /** * Map of table to table {@link HRegionLocation}s. */ - private final ConcurrentMap> - cachedRegionLocations = new CopyOnWriteArrayMap<>(); + private final ConcurrentMap> cachedRegionLocations = + new CopyOnWriteArrayMap<>(); // The presence of a server in the map implies it's likely that there is an // entry in cachedRegionLocations that map to this server; but the absence @@ -66,14 +64,12 @@ public MetaCache(MetricsConnection metrics) { } /** - * Search the cache for a location that fits our table and row key. - * Return null if no suitable region is located. - * + * Search the cache for a location that fits our table and row key. Return null if no suitable + * region is located. * @return Null or region location found in cache. */ - public RegionLocations getCachedLocation(final TableName tableName, final byte [] row) { - ConcurrentNavigableMap tableLocations = - getTableLocations(tableName); + public RegionLocations getCachedLocation(final TableName tableName, final byte[] row) { + ConcurrentNavigableMap tableLocations = getTableLocations(tableName); Entry e = tableLocations.floorEntry(row); if (e == null) { @@ -96,8 +92,8 @@ public RegionLocations getCachedLocation(final TableName tableName, final byte [ // coming in here. // 2. Even if META region comes in, its end key will be empty byte[] and so Bytes.equals(endKey, // HConstants.EMPTY_END_ROW) check itself will pass. - if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) || - Bytes.compareTo(endKey, 0, endKey.length, row, 0, row.length) > 0) { + if (Bytes.equals(endKey, HConstants.EMPTY_END_ROW) + || Bytes.compareTo(endKey, 0, endKey.length, row, 0, row.length) > 0) { if (metrics != null) metrics.incrMetaCacheHit(); return possibleRegion; } @@ -116,9 +112,9 @@ public RegionLocations getCachedLocation(final TableName tableName, final byte [ public void cacheLocation(final TableName tableName, final ServerName source, final HRegionLocation location) { assert source != null; - byte [] startKey = location.getRegion().getStartKey(); + byte[] startKey = location.getRegion().getStartKey(); ConcurrentMap tableLocations = getTableLocations(tableName); - RegionLocations locations = new RegionLocations(new HRegionLocation[] {location}) ; + RegionLocations locations = new RegionLocations(new HRegionLocation[] { location }); RegionLocations oldLocations = tableLocations.putIfAbsent(startKey, locations); boolean isNewCacheEntry = (oldLocations == null); if (isNewCacheEntry) { @@ -130,8 +126,8 @@ public void cacheLocation(final TableName tableName, final ServerName source, } // If the server in cache sends us a redirect, assume it's always valid. - HRegionLocation oldLocation = oldLocations.getRegionLocation( - location.getRegion().getReplicaId()); + HRegionLocation oldLocation = + oldLocations.getRegionLocation(location.getRegion().getReplicaId()); boolean force = oldLocation != null && oldLocation.getServerName() != null && oldLocation.getServerName().equals(source); @@ -156,7 +152,7 @@ public void cacheLocation(final TableName tableName, final ServerName source, * @param locations the new locations */ public void cacheLocation(final TableName tableName, final RegionLocations locations) { - byte [] startKey = locations.getRegionLocation().getRegion().getStartKey(); + byte[] startKey = locations.getRegionLocation().getRegion().getStartKey(); ConcurrentMap tableLocations = getTableLocations(tableName); RegionLocations oldLocation = tableLocations.putIfAbsent(startKey, locations); boolean isNewCacheEntry = (oldLocation == null); @@ -191,8 +187,8 @@ private void addToCachedServers(RegionLocations locations) { * @param tableName * @return Map of cached locations for passed tableName */ - private ConcurrentNavigableMap getTableLocations( - final TableName tableName) { + private ConcurrentNavigableMap + getTableLocations(final TableName tableName) { // find the map of cached locations for this table return computeIfAbsent(cachedRegionLocations, tableName, () -> new CopyOnWriteArrayMap<>(Bytes.BYTES_COMPARATOR)); @@ -210,8 +206,7 @@ public boolean isRegionCached(TableName tableName, final byte[] row) { } /** - * Return the number of cached region for a table. It will only be called - * from a unit test. + * Return the number of cached region for a table. It will only be called from a unit test. */ public int getNumberOfCachedRegionLocations(final TableName tableName) { Map tableLocs = this.cachedRegionLocations.get(tableName); @@ -244,13 +239,13 @@ public void clearCache(final ServerName serverName) { boolean deletedSomething = false; synchronized (this.cachedServers) { // We block here, because if there is an error on a server, it's likely that multiple - // threads will get the error simultaneously. If there are hundreds of thousand of - // region location to check, it's better to do this only once. A better pattern would - // be to check if the server is dead when we get the region location. + // threads will get the error simultaneously. If there are hundreds of thousand of + // region location to check, it's better to do this only once. A better pattern would + // be to check if the server is dead when we get the region location. if (!this.cachedServers.contains(serverName)) { return; } - for (ConcurrentMap tableLocations : cachedRegionLocations.values()){ + for (ConcurrentMap tableLocations : cachedRegionLocations.values()) { for (Entry e : tableLocations.entrySet()) { RegionLocations regionLocations = e.getValue(); if (regionLocations != null) { @@ -259,8 +254,8 @@ public void clearCache(final ServerName serverName) { if (updatedLocations.isEmpty()) { deletedSomething |= tableLocations.remove(e.getKey(), regionLocations); } else { - deletedSomething |= tableLocations.replace(e.getKey(), regionLocations, - updatedLocations); + deletedSomething |= + tableLocations.replace(e.getKey(), regionLocations, updatedLocations); } } } @@ -293,7 +288,7 @@ public void clearCache(final TableName tableName) { * @param tableName tableName * @param row */ - public void clearCache(final TableName tableName, final byte [] row) { + public void clearCache(final TableName tableName, final byte[] row) { ConcurrentMap tableLocations = getTableLocations(tableName); RegionLocations regionLocations = getCachedLocation(tableName, row); @@ -317,7 +312,7 @@ public void clearCache(final TableName tableName, final byte [] row) { * @param row row key * @param replicaId region replica id */ - public void clearCache(final TableName tableName, final byte [] row, int replicaId) { + public void clearCache(final TableName tableName, final byte[] row, int replicaId) { ConcurrentMap tableLocations = getTableLocations(tableName); RegionLocations regionLocations = getCachedLocation(tableName, row); @@ -348,7 +343,7 @@ public void clearCache(final TableName tableName, final byte [] row, int replica /** * Delete a cached location for a table, row and server */ - public void clearCache(final TableName tableName, final byte [] row, ServerName serverName) { + public void clearCache(final TableName tableName, final byte[] row, ServerName serverName) { ConcurrentMap tableLocations = getTableLocations(tableName); RegionLocations regionLocations = getCachedLocation(tableName, row); @@ -368,7 +363,7 @@ public void clearCache(final TableName tableName, final byte [] row, ServerName } if (LOG.isTraceEnabled()) { LOG.trace("Removed locations of table: " + tableName + " ,row: " + Bytes.toString(row) - + " mapping to server: " + serverName + " from cache"); + + " mapping to server: " + serverName + " from cache"); } } } @@ -420,7 +415,7 @@ public void clearCache(final HRegionLocation location) { removed = tableLocations.remove(location.getRegion().getStartKey(), regionLocations); } else { removed = tableLocations.replace(location.getRegion().getStartKey(), regionLocations, - updatedLocations); + updatedLocations); } if (removed) { if (metrics != null) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java index 8566ec551e72..7bb89c650b64 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MetricsConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,29 +26,27 @@ import com.codahale.metrics.MetricRegistry; import com.codahale.metrics.RatioGauge; import com.codahale.metrics.Timer; - import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; - import org.apache.hadoop.hbase.ServerName; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.protobuf.Descriptors.MethodDescriptor; import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; /** - * This class is for maintaining the various connection statistics and publishing them through - * the metrics interfaces. - * - * This class manages its own {@link MetricRegistry} and {@link JmxReporter} so as to not - * conflict with other uses of Yammer Metrics within the client application. Instantiating + * This class is for maintaining the various connection statistics and publishing them through the + * metrics interfaces. This class manages its own {@link MetricRegistry} and {@link JmxReporter} so + * as to not conflict with other uses of Yammer Metrics within the client application. Instantiating * this class implicitly creates and "starts" instances of these classes; be sure to call * {@link #shutdown()} to terminate the thread pools they allocate. */ @@ -140,12 +138,10 @@ private CallTracker(MetricRegistry registry, String name, String subName, String sb.append("(").append(subName).append(")"); } this.name = sb.toString(); - this.callTimer = registry.timer(name(MetricsConnection.class, - DRTN_BASE + this.name, scope)); - this.reqHist = registry.histogram(name(MetricsConnection.class, - REQ_BASE + this.name, scope)); - this.respHist = registry.histogram(name(MetricsConnection.class, - RESP_BASE + this.name, scope)); + this.callTimer = registry.timer(name(MetricsConnection.class, DRTN_BASE + this.name, scope)); + this.reqHist = registry.histogram(name(MetricsConnection.class, REQ_BASE + this.name, scope)); + this.respHist = + registry.histogram(name(MetricsConnection.class, RESP_BASE + this.name, scope)); } private CallTracker(MetricRegistry registry, String name, String scope) { @@ -171,10 +167,10 @@ protected static class RegionStats { public RegionStats(MetricRegistry registry, String name) { this.name = name; - this.memstoreLoadHist = registry.histogram(name(MetricsConnection.class, - MEMLOAD_BASE + this.name)); - this.heapOccupancyHist = registry.histogram(name(MetricsConnection.class, - HEAP_BASE + this.name)); + this.memstoreLoadHist = + registry.histogram(name(MetricsConnection.class, MEMLOAD_BASE + this.name)); + this.heapOccupancyHist = + registry.histogram(name(MetricsConnection.class, HEAP_BASE + this.name)); } public void update(RegionLoadStats regionStatistics) { @@ -189,12 +185,10 @@ protected static class RunnerStats { final Histogram delayIntevalHist; public RunnerStats(MetricRegistry registry) { - this.normalRunners = registry.counter( - name(MetricsConnection.class, "normalRunnersCount")); - this.delayRunners = registry.counter( - name(MetricsConnection.class, "delayRunnersCount")); - this.delayIntevalHist = registry.histogram( - name(MetricsConnection.class, "delayIntervalHist")); + this.normalRunners = registry.counter(name(MetricsConnection.class, "normalRunnersCount")); + this.delayRunners = registry.counter(name(MetricsConnection.class, "delayRunnersCount")); + this.delayIntevalHist = + registry.histogram(name(MetricsConnection.class, "delayIntervalHist")); } public void incrNormalRunners() { @@ -210,11 +204,10 @@ public void updateDelayInterval(long interval) { } } - protected ConcurrentHashMap> serverStats - = new ConcurrentHashMap<>(); + protected ConcurrentHashMap> serverStats = + new ConcurrentHashMap<>(); - public void updateServerStats(ServerName serverName, byte[] regionName, - Object r) { + public void updateServerStats(ServerName serverName, byte[] regionName, Object r) { if (!(r instanceof Result)) { return; } @@ -255,19 +248,22 @@ private static interface NewMetric { private final String scope; private final NewMetric timerFactory = new NewMetric() { - @Override public Timer newMetric(Class clazz, String name, String scope) { + @Override + public Timer newMetric(Class clazz, String name, String scope) { return registry.timer(name(clazz, name, scope)); } }; private final NewMetric histogramFactory = new NewMetric() { - @Override public Histogram newMetric(Class clazz, String name, String scope) { + @Override + public Histogram newMetric(Class clazz, String name, String scope) { return registry.histogram(name(clazz, name, scope)); } }; private final NewMetric counterFactory = new NewMetric() { - @Override public Counter newMetric(Class clazz, String name, String scope) { + @Override + public Counter newMetric(Class clazz, String name, String scope) { return registry.counter(name(clazz, name, scope)); } }; @@ -300,46 +296,43 @@ private static interface NewMetric { // a big improvement over calling registry.newMetric each time. protected final ConcurrentMap rpcTimers = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - protected final ConcurrentMap rpcHistograms = - new ConcurrentHashMap<>(CAPACITY * 2 /* tracking both request and response sizes */, - LOAD_FACTOR, CONCURRENCY_LEVEL); + protected final ConcurrentMap rpcHistograms = new ConcurrentHashMap<>( + CAPACITY * 2 /* tracking both request and response sizes */, LOAD_FACTOR, CONCURRENCY_LEVEL); private final ConcurrentMap cacheDroppingExceptions = - new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); - protected final ConcurrentMap rpcCounters = + new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); + protected final ConcurrentMap rpcCounters = new ConcurrentHashMap<>(CAPACITY, LOAD_FACTOR, CONCURRENCY_LEVEL); MetricsConnection(String scope, Supplier batchPool, Supplier metaPool) { this.scope = scope; this.registry = new MetricRegistry(); - this.registry.register(getExecutorPoolName(), - new RatioGauge() { - @Override - protected Ratio getRatio() { - ThreadPoolExecutor pool = batchPool.get(); - if (pool == null) { - return Ratio.of(0, 0); - } - return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); - } - }); - this.registry.register(getMetaPoolName(), - new RatioGauge() { - @Override - protected Ratio getRatio() { - ThreadPoolExecutor pool = metaPool.get(); - if (pool == null) { - return Ratio.of(0, 0); - } - return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); - } - }); + this.registry.register(getExecutorPoolName(), new RatioGauge() { + @Override + protected Ratio getRatio() { + ThreadPoolExecutor pool = batchPool.get(); + if (pool == null) { + return Ratio.of(0, 0); + } + return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); + } + }); + this.registry.register(getMetaPoolName(), new RatioGauge() { + @Override + protected Ratio getRatio() { + ThreadPoolExecutor pool = metaPool.get(); + if (pool == null) { + return Ratio.of(0, 0); + } + return Ratio.of(pool.getActiveCount(), pool.getMaximumPoolSize()); + } + }); this.metaCacheHits = registry.counter(name(this.getClass(), "metaCacheHits", scope)); this.metaCacheMisses = registry.counter(name(this.getClass(), "metaCacheMisses", scope)); - this.metaCacheNumClearServer = registry.counter(name(this.getClass(), - "metaCacheNumClearServer", scope)); - this.metaCacheNumClearRegion = registry.counter(name(this.getClass(), - "metaCacheNumClearRegion", scope)); + this.metaCacheNumClearServer = + registry.counter(name(this.getClass(), "metaCacheNumClearServer", scope)); + this.metaCacheNumClearRegion = + registry.counter(name(this.getClass(), "metaCacheNumClearRegion", scope)); this.hedgedReadOps = registry.counter(name(this.getClass(), "hedgedReadOps", scope)); this.hedgedReadWin = registry.counter(name(this.getClass(), "hedgedReadWin", scope)); this.getTracker = new CallTracker(this.registry, "Get", scope); @@ -350,10 +343,10 @@ protected Ratio getRatio() { this.putTracker = new CallTracker(this.registry, "Mutate", "Put", scope); this.multiTracker = new CallTracker(this.registry, "Multi", scope); this.runnerStats = new RunnerStats(this.registry); - this.concurrentCallsPerServerHist = registry.histogram(name(MetricsConnection.class, - "concurrentCallsPerServer", scope)); - this.numActionsPerServerHist = registry.histogram(name(MetricsConnection.class, - "numActionsPerServer", scope)); + this.concurrentCallsPerServerHist = + registry.histogram(name(MetricsConnection.class, "concurrentCallsPerServer", scope)); + this.numActionsPerServerHist = + registry.histogram(name(MetricsConnection.class, "numActionsPerServer", scope)); this.nsLookups = registry.counter(name(this.getClass(), NS_LOOKUPS, scope)); this.nsLookupsFailed = registry.counter(name(this.getClass(), NS_LOOKUPS_FAILED, scope)); @@ -438,8 +431,8 @@ private T getMetric(String key, ConcurrentMap map, NewMetric f /** Update call stats for non-critical-path methods */ private void updateRpcGeneric(String methodName, CallStats stats) { - getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory) - .update(stats.getCallTimeMs(), TimeUnit.MILLISECONDS); + getMetric(DRTN_BASE + methodName, rpcTimers, timerFactory).update(stats.getCallTimeMs(), + TimeUnit.MILLISECONDS); getMetric(REQ_BASE + methodName, rpcHistograms, histogramFactory) .update(stats.getRequestSizeBytes()); getMetric(RESP_BASE + methodName, rpcHistograms, histogramFactory) @@ -458,7 +451,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { // this implementation is tied directly to protobuf implementation details. would be better // if we could dispatch based on something static, ie, request Message type. if (method.getService() == ClientService.getDescriptor()) { - switch(method.getIndex()) { + switch (method.getIndex()) { case 0: assert "Get".equals(method.getName()); getTracker.updateRpc(stats); @@ -466,7 +459,7 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { case 1: assert "Mutate".equals(method.getName()); final MutationType mutationType = ((MutateRequest) param).getMutation().getMutateType(); - switch(mutationType) { + switch (mutationType) { case APPEND: appendTracker.updateRpc(stats); return; @@ -520,8 +513,8 @@ public void updateRpc(MethodDescriptor method, Message param, CallStats stats) { } public void incrCacheDroppingExceptions(Object exception) { - getMetric(CACHE_BASE + - (exception == null? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), + getMetric( + CACHE_BASE + (exception == null ? UNKNOWN_EXCEPTION : exception.getClass().getSimpleName()), cacheDroppingExceptions, counterFactory).inc(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java index 6ad44f08a60d..fc473bdbb709 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MobCompactPartitionPolicy.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java index 193402155150..360645b68763 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiAction.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,8 +29,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Container for Actions (i.e. Get, Delete, or Put), which are grouped by - * regionName. Intended to be used with {@link AsyncProcess}. + * Container for Actions (i.e. Get, Delete, or Put), which are grouped by regionName. Intended to be + * used with {@link AsyncProcess}. */ @InterfaceAudience.Private public final class MultiAction { @@ -48,7 +47,6 @@ public MultiAction() { /** * Get the total number of Actions - * * @return total number of Actions for all groups in this container. */ public int size() { @@ -60,10 +58,9 @@ public int size() { } /** - * Add an Action to this container based on it's regionName. If the regionName - * is wrong, the initial execution will fail, but will be automatically - * retried after looking up the correct region. - * + * Add an Action to this container based on it's regionName. If the regionName is wrong, the + * initial execution will fail, but will be automatically retried after looking up the correct + * region. * @param regionName * @param a */ @@ -72,14 +69,13 @@ public void add(byte[] regionName, Action a) { } /** - * Add an Action to this container based on it's regionName. If the regionName - * is wrong, the initial execution will fail, but will be automatically - * retried after looking up the correct region. - * + * Add an Action to this container based on it's regionName. If the regionName is wrong, the + * initial execution will fail, but will be automatically retried after looking up the correct + * region. * @param regionName * @param actionList list of actions to add for the region */ - public void add(byte[] regionName, List actionList){ + public void add(byte[] regionName, List actionList) { List rsActions = actions.get(regionName); if (rsActions == null) { rsActions = new ArrayList<>(actionList.size()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 03f168893a71..1117b3bcd952 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.HashMap; import java.util.Map; import java.util.TreeMap; - +import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -import org.apache.hadoop.hbase.util.Bytes; /** * A container for Result objects, grouped by regionName. @@ -37,11 +35,10 @@ public class MultiResponse extends AbstractResponse { private Map results = new TreeMap<>(Bytes.BYTES_COMPARATOR); /** - * The server can send us a failure for the region itself, instead of individual failure. - * It's a part of the protobuf definition. + * The server can send us a failure for the region itself, instead of individual failure. It's a + * part of the protobuf definition. */ - private Map exceptions = - new TreeMap<>(Bytes.BYTES_COMPARATOR); + private Map exceptions = new TreeMap<>(Bytes.BYTES_COMPARATOR); public MultiResponse() { super(); @@ -52,7 +49,7 @@ public MultiResponse() { */ public int size() { int size = 0; - for (RegionResult result: results.values()) { + for (RegionResult result : results.values()) { size += result.size(); } return size; @@ -60,7 +57,6 @@ public int size() { /** * Add the pair to the container, grouped by the regionName - * * @param regionName * @param originalIndex the original index of the Action (request). * @param resOrEx the result or error; will be empty for successful Put and Delete actions. @@ -69,14 +65,14 @@ public void add(byte[] regionName, int originalIndex, Object resOrEx) { getResult(regionName).addResult(originalIndex, resOrEx); } - public void addException(byte []regionName, Throwable ie){ + public void addException(byte[] regionName, Throwable ie) { exceptions.put(regionName, ie); } /** * @return the exception for the region, if any. Null otherwise. */ - public Throwable getException(byte []regionName){ + public Throwable getException(byte[] regionName) { return exceptions.get(regionName); } @@ -88,7 +84,7 @@ public void addStatistic(byte[] regionName, ClientProtos.RegionLoadStats stat) { getResult(regionName).setStat(stat); } - private RegionResult getResult(byte[] region){ + private RegionResult getResult(byte[] region) { RegionResult rs = results.get(region); if (rs == null) { rs = new RegionResult(); @@ -97,7 +93,7 @@ private RegionResult getResult(byte[] region){ return rs; } - public Map getResults(){ + public Map getResults() { return this.results; } @@ -106,15 +102,15 @@ public ResponseType type() { return ResponseType.MULTI; } - static class RegionResult{ + static class RegionResult { Map result = new HashMap<>(); ClientProtos.RegionLoadStats stat; - public void addResult(int index, Object result){ + public void addResult(int index, Object result) { this.result.put(index, result); } - public void setStat(ClientProtos.RegionLoadStats stat){ + public void setStat(ClientProtos.RegionLoadStats stat) { this.stat = stat; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java index 4b4f08410a2c..de8c992d2092 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiServerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.CellScannable; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -31,18 +30,20 @@ import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; + import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter; import org.apache.hadoop.hbase.shaded.protobuf.ResponseConverter; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; /** - * Callable that handles the multi method call going against a single - * regionserver; i.e. A RegionServerCallable for the multi call (It is NOT a - * RegionServerCallable that goes against multiple regions). + * Callable that handles the multi method call going against a single regionserver; + * i.e. A RegionServerCallable for the multi call (It is NOT a RegionServerCallable that goes + * against multiple regions). * @param */ @InterfaceAudience.Private @@ -102,8 +103,8 @@ protected MultiResponse rpcCall() throws Exception { // is RowMutations/CheckAndMutate in the action list. Map indexMap = new HashMap<>(); // The multi object is a list of Actions by region. Iterate by region. - for (Map.Entry> e: this.multiAction.actions.entrySet()) { - final byte [] regionName = e.getKey(); + for (Map.Entry> e : this.multiAction.actions.entrySet()) { + final byte[] regionName = e.getKey(); final List actions = e.getValue(); if (this.cellBlock) { // Send data in cellblocks. @@ -132,8 +133,8 @@ protected MultiResponse rpcCall() throws Exception { } /** - * @return True if we should send data in cellblocks. This is an expensive call. Cache the - * result if you can rather than call each time. + * @return True if we should send data in cellblocks. This is an expensive call. Cache the result + * if you can rather than call each time. */ private boolean isCellBlock() { // This is not exact -- the configuration could have changed on us after connection was set up diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java index 0aa301c4c8cd..503de6994214 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MutableRegionInfo.java @@ -16,6 +16,7 @@ * limitations under the License. */ package org.apache.hadoop.hbase.client; + import java.util.Arrays; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellComparatorImpl; @@ -36,26 +37,16 @@ class MutableRegionInfo implements RegionInfo { private static final int MAX_REPLICA_ID = 0xFFFF; /** - * The new format for a region name contains its encodedName at the end. - * The encoded name also serves as the directory name for the region - * in the filesystem. - * - * New region name format: - * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. - * where, - * <encodedName> is a hex version of the MD5 hash of - * <tablename>,<startkey>,<regionIdTimestamp> - * - * The old region name format: - * <tablename>,<startkey>,<regionIdTimestamp> - * For region names in the old format, the encoded name is a 32-bit - * JenkinsHash integer value (in its decimal notation, string form). - *

    - * **NOTE** - * - * The first hbase:meta region, and regions created by an older - * version of HBase (0.20 or prior) will continue to use the - * old region name format. + * The new format for a region name contains its encodedName at the end. The encoded name also + * serves as the directory name for the region in the filesystem. New region name format: + * <tablename>,,<startkey>,<regionIdTimestamp>.<encodedName>. where, <encodedName> + * is a hex version of the MD5 hash of <tablename>,<startkey>,<regionIdTimestamp> The old + * region name format: <tablename>,<startkey>,<regionIdTimestamp> For region names in the + * old format, the encoded name is a 32-bit JenkinsHash integer value (in its decimal notation, + * string form). + *

    + * **NOTE** The first hbase:meta region, and regions created by an older version of HBase (0.20 or + * prior) will continue to use the old region name format. */ // This flag is in the parent of a split while the parent is still referenced by daughter @@ -76,8 +67,8 @@ class MutableRegionInfo implements RegionInfo { private final TableName tableName; private static int generateHashCode(final TableName tableName, final byte[] startKey, - final byte[] endKey, final long regionId, - final int replicaId, boolean offLine, byte[] regionName) { + final byte[] endKey, final long regionId, final int replicaId, boolean offLine, + byte[] regionName) { int result = Arrays.hashCode(regionName); result = (int) (result ^ regionId); result ^= Arrays.hashCode(checkStartKey(startKey)); @@ -89,11 +80,11 @@ private static int generateHashCode(final TableName tableName, final byte[] star } private static byte[] checkStartKey(byte[] startKey) { - return startKey == null? HConstants.EMPTY_START_ROW: startKey; + return startKey == null ? HConstants.EMPTY_START_ROW : startKey; } private static byte[] checkEndKey(byte[] endKey) { - return endKey == null? HConstants.EMPTY_END_ROW: endKey; + return endKey == null ? HConstants.EMPTY_END_ROW : endKey; } private static TableName checkTableName(TableName tableName) { @@ -115,7 +106,7 @@ private static int checkReplicaId(int regionId) { */ MutableRegionInfo(long regionId, TableName tableName, int replicaId) { this(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW, false, regionId, - replicaId, false); + replicaId, false); } MutableRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, @@ -145,11 +136,10 @@ public String getShortNameToLog() { /** @return the regionId */ @Override - public long getRegionId(){ + public long getRegionId() { return regionId; } - /** * @return the regionName as an array of bytes. * @see #getRegionNameAsString() @@ -200,25 +190,22 @@ public TableName getTable() { } /** - * Returns true if the given inclusive range of rows is fully contained - * by this region. For example, if the region is foo,a,g and this is - * passed ["b","c"] or ["a","c"] it will return true, but if this is passed - * ["b","z"] it will return false. + * Returns true if the given inclusive range of rows is fully contained by this region. For + * example, if the region is foo,a,g and this is passed ["b","c"] or ["a","c"] it will return + * true, but if this is passed ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ @Override public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); if (cellComparator.compareRows(rangeStartKey, rangeEndKey) > 0) { - throw new IllegalArgumentException( - "Invalid range: " + Bytes.toStringBinary(rangeStartKey) + - " > " + Bytes.toStringBinary(rangeEndKey)); + throw new IllegalArgumentException("Invalid range: " + Bytes.toStringBinary(rangeStartKey) + + " > " + Bytes.toStringBinary(rangeEndKey)); } boolean firstKeyInRange = cellComparator.compareRows(rangeStartKey, startKey) >= 0; - boolean lastKeyInRange = - cellComparator.compareRows(rangeEndKey, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); + boolean lastKeyInRange = cellComparator.compareRows(rangeEndKey, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY); return firstKeyInRange && lastKeyInRange; } @@ -228,9 +215,9 @@ public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { @Override public boolean containsRow(byte[] row) { CellComparator cellComparator = CellComparatorImpl.getCellComparator(tableName); - return cellComparator.compareRows(row, startKey) >= 0 && - (cellComparator.compareRows(row, endKey) < 0 || - Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); + return cellComparator.compareRows(row, startKey) >= 0 + && (cellComparator.compareRows(row, endKey) < 0 + || Bytes.equals(endKey, HConstants.EMPTY_BYTE_ARRAY)); } /** @return true if this region is a meta region */ @@ -268,8 +255,8 @@ public boolean isOffline() { } /** - * The parent of a region split is offline while split daughters hold - * references to the parent. Offlined regions are closed. + * The parent of a region split is offline while split daughters hold references to the parent. + * Offlined regions are closed. * @param offLine Set online/offline status. * @return MutableRegionInfo */ @@ -309,14 +296,11 @@ public int getReplicaId() { */ @Override public String toString() { - return "{ENCODED => " + getEncodedName() + ", " + - HConstants.NAME + " => '" + Bytes.toStringBinary(this.regionName) - + "', STARTKEY => '" + - Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + - Bytes.toStringBinary(this.endKey) + "'" + - (isOffline()? ", OFFLINE => true": "") + - (isSplit()? ", SPLIT => true": "") + - ((replicaId > 0)? ", REPLICA_ID => " + replicaId : "") + "}"; + return "{ENCODED => " + getEncodedName() + ", " + HConstants.NAME + " => '" + + Bytes.toStringBinary(this.regionName) + "', STARTKEY => '" + + Bytes.toStringBinary(this.startKey) + "', ENDKEY => '" + Bytes.toStringBinary(this.endKey) + + "'" + (isOffline() ? ", OFFLINE => true" : "") + (isSplit() ? ", SPLIT => true" : "") + + ((replicaId > 0) ? ", REPLICA_ID => " + replicaId : "") + "}"; } /** @@ -333,7 +317,7 @@ public boolean equals(Object o) { if (!(o instanceof RegionInfo)) { return false; } - return compareTo((RegionInfo)o) == 0; + return compareTo((RegionInfo) o) == 0; } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java index d49b7257e500..4e4124e6fac4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Mutation.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -62,24 +61,23 @@ import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; @InterfaceAudience.Public -public abstract class Mutation extends OperationWithAttributes implements Row, CellScannable, - HeapSize { +public abstract class Mutation extends OperationWithAttributes + implements Row, CellScannable, HeapSize { public static final long MUTATION_OVERHEAD = ClassSize.align( - // This - ClassSize.OBJECT + - // row + OperationWithAttributes.attributes - 2 * ClassSize.REFERENCE + - // Timestamp - 1 * Bytes.SIZEOF_LONG + - // durability - ClassSize.REFERENCE + - // familyMap - ClassSize.REFERENCE + - // familyMap - ClassSize.TREEMAP + - // priority - ClassSize.INTEGER - ); + // This + ClassSize.OBJECT + + // row + OperationWithAttributes.attributes + 2 * ClassSize.REFERENCE + + // Timestamp + 1 * Bytes.SIZEOF_LONG + + // durability + ClassSize.REFERENCE + + // familyMap + ClassSize.REFERENCE + + // familyMap + ClassSize.TREEMAP + + // priority + ClassSize.INTEGER); /** * The attribute for storing the list of clusters that have consumed the change. @@ -94,17 +92,16 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C private static final String RETURN_RESULTS = "_rr_"; // TODO: row should be final - protected byte [] row = null; + protected byte[] row = null; protected long ts = HConstants.LATEST_TIMESTAMP; protected Durability durability = Durability.USE_DEFAULT; // TODO: familyMap should be final // A Map sorted by column family. - protected NavigableMap> familyMap; + protected NavigableMap> familyMap; /** - * empty construction. - * We need this empty construction to keep binary compatibility. + * empty construction. We need this empty construction to keep binary compatibility. */ protected Mutation() { this.familyMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); @@ -114,10 +111,10 @@ protected Mutation(Mutation clone) { super(clone); this.row = clone.getRow(); this.ts = clone.getTimestamp(); - this.familyMap = clone.getFamilyCellMap().entrySet().stream(). - collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { - throw new RuntimeException("collisions!!!"); - }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); + this.familyMap = clone.getFamilyCellMap().entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> new ArrayList<>(e.getValue()), (k, v) -> { + throw new RuntimeException("collisions!!!"); + }, () -> new TreeMap<>(Bytes.BYTES_COMPARATOR))); } /** @@ -126,7 +123,7 @@ protected Mutation(Mutation clone) { * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { + protected Mutation(byte[] row, long ts, NavigableMap> familyMap) { this.row = Preconditions.checkNotNull(row); if (row.length == 0) { throw new IllegalArgumentException("Row can't be empty"); @@ -141,9 +138,8 @@ public CellScanner cellScanner() { } /** - * Creates an empty list if one doesn't exist for the given column family - * or else it returns the associated list of Cell objects. - * + * Creates an empty list if one doesn't exist for the given column family or else it returns the + * associated list of Cell objects. * @param family column family * @return a list of Cell objects, returns an empty list if one doesn't exist. */ @@ -158,7 +154,6 @@ List getCellList(byte[] family) { /* * Create a KeyValue with this objects row key and the Put identifier. - * * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] value) { @@ -181,20 +176,18 @@ KeyValue createPutKeyValue(byte[] family, byte[] qualifier, long ts, byte[] valu /* * Create a KeyValue with this objects row key and the Put identifier. - * * @return a KeyValue with this objects row key and the Put identifier. */ KeyValue createPutKeyValue(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value, Tag[] tags) { - return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, - family, 0, family == null ? 0 : family.length, - qualifier, ts, KeyValue.Type.Put, value, tags != null ? Arrays.asList(tags) : null); + return new KeyValue(this.row, 0, this.row == null ? 0 : this.row.length, family, 0, + family == null ? 0 : family.length, qualifier, ts, KeyValue.Type.Put, value, + tags != null ? Arrays.asList(tags) : null); } /** - * Compile the column family (i.e. schema) information - * into a Map. Useful for parsing and aggregation by debugging, - * logging, and administration tools. + * Compile the column family (i.e. schema) information into a Map. Useful for parsing and + * aggregation by debugging, logging, and administration tools. * @return Map */ @Override @@ -204,16 +197,16 @@ public Map getFingerprint() { // ideally, we would also include table information, but that information // is not stored in each Operation instance. map.put("families", families); - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { families.add(Bytes.toStringBinary(entry.getKey())); } return map; } /** - * Compile the details beyond the scope of getFingerprint (row, columns, - * timestamps, etc.) into a Map along with the fingerprinted information. - * Useful for debugging, logging, and administration tools. + * Compile the details beyond the scope of getFingerprint (row, columns, timestamps, etc.) into a + * Map along with the fingerprinted information. Useful for debugging, logging, and administration + * tools. * @param maxCols a limit on the number of columns output prior to truncation * @return Map */ @@ -228,7 +221,7 @@ public Map toMap(int maxCols) { map.put("row", Bytes.toStringBinary(this.row)); int colCount = 0; // iterate through all column families affected - for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { // map from this family to details for each cell affected within the family List> qualifierDetails = new ArrayList<>(); columns.put(Bytes.toStringBinary(entry.getKey()), qualifierDetails); @@ -237,7 +230,7 @@ public Map toMap(int maxCols) { continue; } // add details for each cell - for (Cell cell: entry.getValue()) { + for (Cell cell : entry.getValue()) { if (--maxCols <= 0) { continue; } @@ -262,16 +255,15 @@ public Map toMap(int maxCols) { private static Map cellToStringMap(Cell c) { Map stringMap = new HashMap<>(); - stringMap.put("qualifier", Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), - c.getQualifierLength())); + stringMap.put("qualifier", + Bytes.toStringBinary(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength())); stringMap.put("timestamp", c.getTimestamp()); stringMap.put("vlen", c.getValueLength()); List tags = PrivateCellUtil.getTags(c); if (tags != null) { List tagsString = new ArrayList<>(tags.size()); for (Tag t : tags) { - tagsString - .add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); + tagsString.add((t.getType()) + ":" + Bytes.toStringBinary(Tag.cloneValue(t))); } stringMap.put("tag", tagsString); } @@ -296,18 +288,18 @@ public Durability getDurability() { * Method for retrieving the put's familyMap * @return familyMap */ - public NavigableMap> getFamilyCellMap() { + public NavigableMap> getFamilyCellMap() { return this.familyMap; } /** * Method for setting the mutation's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Mutation#Mutation(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Mutation#Mutation(byte[], long, NavigableMap)} instead */ @Deprecated - public Mutation setFamilyCellMap(NavigableMap> map) { - // TODO: Shut this down or move it up to be a Constructor. Get new object rather than change + public Mutation setFamilyCellMap(NavigableMap> map) { + // TODO: Shut this down or move it up to be a Constructor. Get new object rather than change // this internal data member. this.familyMap = map; return this; @@ -326,13 +318,13 @@ public boolean isEmpty() { * @return row */ @Override - public byte [] getRow() { + public byte[] getRow() { return this.row; } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Row#COMPARATOR} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Row#COMPARATOR} instead */ @Deprecated @Override @@ -343,8 +335,8 @@ public int compareTo(final Row d) { /** * Method for retrieving the timestamp * @return timestamp - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #getTimestamp()} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link #getTimestamp()} instead */ @Deprecated public long getTimeStamp() { @@ -353,7 +345,6 @@ public long getTimeStamp() { /** * Method for retrieving the timestamp. - * * @return timestamp */ public long getTimestamp() { @@ -381,10 +372,10 @@ public Mutation setClusterIds(List clusterIds) { public List getClusterIds() { List clusterIds = new ArrayList<>(); byte[] bytes = getAttribute(CONSUMED_CLUSTER_IDS); - if(bytes != null) { + if (bytes != null) { ByteArrayDataInput in = ByteStreams.newDataInput(bytes); int numClusters = in.readInt(); - for(int i=0; i getClusterIds() { */ public Mutation setCellVisibility(CellVisibility expression) { this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, - toCellVisibility(expression).toByteArray()); + toCellVisibility(expression).toByteArray()); return this; } @@ -413,7 +404,6 @@ public CellVisibility getCellVisibility() throws DeserializationException { /** * Create a protocol buffer CellVisibility based on a client CellVisibility. - * * @param cellVisibility * @return a protocol buffer CellVisibility */ @@ -425,7 +415,6 @@ static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVisibilit /** * Convert a protocol buffer CellVisibility to a client CellVisibility - * * @param proto * @return the converted client CellVisibility */ @@ -436,12 +425,12 @@ private static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto /** * Convert a protocol buffer CellVisibility bytes to a client CellVisibility - * * @param protoBytes * @return the converted client CellVisibility * @throws DeserializationException */ - private static CellVisibility toCellVisibility(byte[] protoBytes) throws DeserializationException { + private static CellVisibility toCellVisibility(byte[] protoBytes) + throws DeserializationException { if (protoBytes == null) return null; ClientProtos.CellVisibility.Builder builder = ClientProtos.CellVisibility.newBuilder(); ClientProtos.CellVisibility proto = null; @@ -483,20 +472,17 @@ public long heapSize() { heapsize += ClassSize.align(ClassSize.ARRAY + this.row.length); // Adding map overhead - heapsize += - ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY); - for(Map.Entry> entry : getFamilyCellMap().entrySet()) { - //Adding key overhead - heapsize += - ClassSize.align(ClassSize.ARRAY + entry.getKey().length); - - //This part is kinds tricky since the JVM can reuse references if you - //store the same value, but have a good match with SizeOf at the moment - //Adding value overhead + heapsize += ClassSize.align(getFamilyCellMap().size() * ClassSize.MAP_ENTRY); + for (Map.Entry> entry : getFamilyCellMap().entrySet()) { + // Adding key overhead + heapsize += ClassSize.align(ClassSize.ARRAY + entry.getKey().length); + + // This part is kinds tricky since the JVM can reuse references if you + // store the same value, but have a good match with SizeOf at the moment + // Adding value overhead heapsize += ClassSize.align(ClassSize.ARRAYLIST); int size = entry.getValue().size(); - heapsize += ClassSize.align(ClassSize.ARRAY + - size * ClassSize.REFERENCE); + heapsize += ClassSize.align(ClassSize.ARRAY + size * ClassSize.REFERENCE); for (Cell cell : entry.getValue()) { heapsize += cell.heapSize(); @@ -539,8 +525,8 @@ public Mutation setACL(Map perms) { /** * Return the TTL requested for the result of the mutation, in milliseconds. - * @return the TTL requested for the result of the mutation, in milliseconds, - * or Long.MAX_VALUE if unset + * @return the TTL requested for the result of the mutation, in milliseconds, or Long.MAX_VALUE if + * unset */ public long getTTL() { byte[] ttlBytes = getAttribute(OP_ATTRIBUTE_TTL); @@ -581,7 +567,7 @@ protected Mutation setReturnResults(boolean returnResults) { * Subclasses should override this method to add the heap size of their own fields. * @return the heap size to add (will be aligned). */ - protected long extraHeapSize(){ + protected long extraHeapSize() { return 0L; } @@ -597,76 +583,71 @@ public Mutation setTimestamp(long timestamp) { } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family & qualifier. - * Both given arguments must match the KeyValue object to return true. - * + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family & qualifier. Both given arguments must match the KeyValue object to return + * true. * @param family column family * @param qualifier column qualifier - * @return returns true if the given family and qualifier already has an - * existing KeyValue object in the family map. + * @return returns true if the given family and qualifier already has an existing KeyValue object + * in the family map. */ - public boolean has(byte [] family, byte [] qualifier) { + public boolean has(byte[] family, byte[] qualifier) { return has(family, qualifier, this.ts, HConstants.EMPTY_BYTE_ARRAY, true, true); } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family, qualifier and timestamp. All 3 given arguments must match the KeyValue object to + * return true. * @param family column family * @param qualifier column qualifier * @param ts timestamp - * @return returns true if the given family, qualifier and timestamp already has an - * existing KeyValue object in the family map. + * @return returns true if the given family, qualifier and timestamp already has an existing + * KeyValue object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, long ts) { + public boolean has(byte[] family, byte[] qualifier, long ts) { return has(family, qualifier, ts, HConstants.EMPTY_BYTE_ARRAY, false, true); } /** - * A convenience method to determine if this object's familyMap contains - * a value assigned to the given family, qualifier and timestamp. - * All 3 given arguments must match the KeyValue object to return true. - * + * A convenience method to determine if this object's familyMap contains a value assigned to the + * given family, qualifier and timestamp. All 3 given arguments must match the KeyValue object to + * return true. * @param family column family * @param qualifier column qualifier * @param value value to check - * @return returns true if the given family, qualifier and value already has an - * existing KeyValue object in the family map. + * @return returns true if the given family, qualifier and value already has an existing KeyValue + * object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, byte [] value) { + public boolean has(byte[] family, byte[] qualifier, byte[] value) { return has(family, qualifier, this.ts, value, true, false); } /** - * A convenience method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp. - * All 4 given arguments must match the KeyValue object to return true. - * + * A convenience method to determine if this object's familyMap contains the given value assigned + * to the given family, qualifier and timestamp. All 4 given arguments must match the KeyValue + * object to return true. * @param family column family * @param qualifier column qualifier * @param ts timestamp * @param value value to check - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. + * @return returns true if the given family, qualifier timestamp and value already has an existing + * KeyValue object in the family map. */ - public boolean has(byte [] family, byte [] qualifier, long ts, byte [] value) { + public boolean has(byte[] family, byte[] qualifier, long ts, byte[] value) { return has(family, qualifier, ts, value, false, false); } /** * Returns a list of all KeyValue objects with matching column family and qualifier. - * * @param family column family * @param qualifier column qualifier - * @return a list of KeyValue objects with the matching family and qualifier, - * returns an empty list if one doesn't exist for the given family. + * @return a list of KeyValue objects with the matching family and qualifier, returns an empty + * list if one doesn't exist for the given family. */ public List get(byte[] family, byte[] qualifier) { List filteredList = new ArrayList<>(); - for (Cell cell: getCellList(family)) { + for (Cell cell : getCellList(family)) { if (CellUtil.matchingQualifier(cell, qualifier)) { filteredList.add(cell); } @@ -675,21 +656,19 @@ public List get(byte[] family, byte[] qualifier) { } /* - * Private method to determine if this object's familyMap contains - * the given value assigned to the given family, qualifier and timestamp - * respecting the 2 boolean arguments - * + * Private method to determine if this object's familyMap contains the given value assigned to the + * given family, qualifier and timestamp respecting the 2 boolean arguments * @param family * @param qualifier * @param ts * @param value * @param ignoreTS * @param ignoreValue - * @return returns true if the given family, qualifier timestamp and value - * already has an existing KeyValue object in the family map. + * @return returns true if the given family, qualifier timestamp and value already has an existing + * KeyValue object in the family map. */ - protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, - boolean ignoreTS, boolean ignoreValue) { + protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, boolean ignoreTS, + boolean ignoreValue) { List list = getCellList(family); if (list.isEmpty()) { return false; @@ -701,10 +680,8 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, // F F => 1 if (!ignoreTS && !ignoreValue) { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && - CellUtil.matchingQualifier(cell, qualifier) && - CellUtil.matchingValue(cell, value) && - cell.getTimestamp() == ts) { + if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier) + && CellUtil.matchingValue(cell, value) && cell.getTimestamp() == ts) { return true; } } @@ -724,8 +701,7 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, } } else { for (Cell cell : list) { - if (CellUtil.matchingFamily(cell, family) && - CellUtil.matchingQualifier(cell, qualifier)) { + if (CellUtil.matchingFamily(cell, family) && CellUtil.matchingQualifier(cell, qualifier)) { return true; } } @@ -735,23 +711,23 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, /** * @param row Row to check - * @throws IllegalArgumentException Thrown if row is empty or null or - * > {@link HConstants#MAX_ROW_LENGTH} + * @throws IllegalArgumentException Thrown if row is empty or null or > + * {@link HConstants#MAX_ROW_LENGTH} * @return row */ - static byte [] checkRow(final byte [] row) { - return checkRow(row, 0, row == null? 0: row.length); + static byte[] checkRow(final byte[] row) { + return checkRow(row, 0, row == null ? 0 : row.length); } /** * @param row Row to check * @param offset * @param length - * @throws IllegalArgumentException Thrown if row is empty or null or - * > {@link HConstants#MAX_ROW_LENGTH} + * @throws IllegalArgumentException Thrown if row is empty or null or > + * {@link HConstants#MAX_ROW_LENGTH} * @return row */ - static byte [] checkRow(final byte [] row, final int offset, final int length) { + static byte[] checkRow(final byte[] row, final int offset, final int length) { if (row == null) { throw new IllegalArgumentException("Row buffer is null"); } @@ -759,8 +735,8 @@ protected boolean has(byte[] family, byte[] qualifier, long ts, byte[] value, throw new IllegalArgumentException("Row length is 0"); } if (length > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row length " + length + " is > " + - HConstants.MAX_ROW_LENGTH); + throw new IllegalArgumentException( + "Row length " + length + " is > " + HConstants.MAX_ROW_LENGTH); } return row; } @@ -773,18 +749,18 @@ static void checkRow(ByteBuffer row) { throw new IllegalArgumentException("Row length is 0"); } if (row.remaining() > HConstants.MAX_ROW_LENGTH) { - throw new IllegalArgumentException("Row length " + row.remaining() + " is > " + - HConstants.MAX_ROW_LENGTH); + throw new IllegalArgumentException( + "Row length " + row.remaining() + " is > " + HConstants.MAX_ROW_LENGTH); } } Mutation add(Cell cell) throws IOException { - //Checking that the row of the kv is the same as the mutation + // Checking that the row of the kv is the same as the mutation // TODO: It is fraught with risk if user pass the wrong row. // Throwing the IllegalArgumentException is more suitable I'd say. if (!CellUtil.matchingRows(cell, this.row)) { - throw new WrongRowIOException("The row in " + cell.toString() + - " doesn't match the original one " + Bytes.toStringBinary(this.row)); + throw new WrongRowIOException("The row in " + cell.toString() + + " doesn't match the original one " + Bytes.toStringBinary(this.row)); } byte[] family; @@ -808,11 +784,10 @@ Mutation add(Cell cell) throws IOException { } private static final class CellWrapper implements ExtendedCell { - private static final long FIXED_OVERHEAD = ClassSize.align( - ClassSize.OBJECT // object header - + KeyValue.TIMESTAMP_SIZE // timestamp - + Bytes.SIZEOF_LONG // sequence id - + 1 * ClassSize.REFERENCE); // references to cell + private static final long FIXED_OVERHEAD = ClassSize.align(ClassSize.OBJECT // object header + + KeyValue.TIMESTAMP_SIZE // timestamp + + Bytes.SIZEOF_LONG // sequence id + + 1 * ClassSize.REFERENCE); // references to cell private final Cell cell; private long sequenceId; private long timestamp; @@ -945,22 +920,19 @@ public byte[] cloneTags() { } private long heapOverhead() { - return FIXED_OVERHEAD - + ClassSize.ARRAY // row - + getFamilyLength() == 0 ? 0 : ClassSize.ARRAY - + getQualifierLength() == 0 ? 0 : ClassSize.ARRAY - + getValueLength() == 0 ? 0 : ClassSize.ARRAY - + getTagsLength() == 0 ? 0 : ClassSize.ARRAY; + return FIXED_OVERHEAD + ClassSize.ARRAY // row + + getFamilyLength() == 0 + ? 0 + : ClassSize.ARRAY + getQualifierLength() == 0 ? 0 + : ClassSize.ARRAY + getValueLength() == 0 ? 0 + : ClassSize.ARRAY + getTagsLength() == 0 ? 0 : ClassSize.ARRAY; } @Override public long heapSize() { - return heapOverhead() - + ClassSize.align(getRowLength()) - + ClassSize.align(getFamilyLength()) - + ClassSize.align(getQualifierLength()) - + ClassSize.align(getValueLength()) - + ClassSize.align(getTagsLength()); + return heapOverhead() + ClassSize.align(getRowLength()) + ClassSize.align(getFamilyLength()) + + ClassSize.align(getQualifierLength()) + ClassSize.align(getValueLength()) + + ClassSize.align(getTagsLength()); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java index 2bae4436383e..c75f9020da73 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryableCallerInterceptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,16 +18,14 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.PreemptiveFastFailException; +import org.apache.yetus.audience.InterfaceAudience; /** - * Class that acts as a NoOpInterceptor. This class is used in case the - * RetryingCallerInterceptor was not configured correctly or an - * RetryingCallerInterceptor was never configured in the first place. - * + * Class that acts as a NoOpInterceptor. This class is used in case the RetryingCallerInterceptor + * was not configured correctly or an RetryingCallerInterceptor was never configured in the first + * place. */ @InterfaceAudience.Private class NoOpRetryableCallerInterceptor extends RetryingCallerInterceptor { @@ -42,14 +40,13 @@ public NoOpRetryableCallerInterceptor(Configuration conf) { } @Override - public void intercept( - RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) + public void intercept(RetryingCallerInterceptorContext abstractRetryingCallerInterceptorContext) throws PreemptiveFastFailException { } @Override - public void handleFailure(RetryingCallerInterceptorContext context, - Throwable t) throws IOException { + public void handleFailure(RetryingCallerInterceptorContext context, Throwable t) + throws IOException { } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryingInterceptorContext.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryingInterceptorContext.java index c726ccda4c72..80c2274223c4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryingInterceptorContext.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoOpRetryingInterceptorContext.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java index 184f0c0bc0f4..6d5d94802d0e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java index 70fa36a5afa6..3020be221059 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NonceGenerator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,9 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * NonceGenerator interface. - * In general, nonce group is an ID (one per client, or region+client, or whatever) that - * could be used to reduce collision potential, or be used by compatible server nonce manager - * to optimize nonce storage and removal. See HBASE-3787. + * NonceGenerator interface. In general, nonce group is an ID (one per client, or region+client, or + * whatever) that could be used to reduce collision potential, or be used by compatible server nonce + * manager to optimize nonce storage and removal. See HBASE-3787. */ @InterfaceAudience.Private public interface NonceGenerator { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java index 2da8422f4832..663a0bb3a697 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoncedRegionServerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,26 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.ipc.HBaseRpcController; +import org.apache.yetus.audience.InterfaceAudience; /** - * Implementations make an rpc call against a RegionService via a protobuf Service. - * Implement #rpcCall(RpcController) and then call {@link #call(int)} to - * trigger the rpc. The {@link #call(int)} eventually invokes your - * #rpcCall(RpcController) meanwhile saving you having to write a bunch of - * boilerplate. The {@link #call(int)} implementation is from {@link RpcRetryingCaller} so rpcs are - * retried on fail. - * - *

    TODO: this class is actually tied to one region, because most of the paths make use of - * the regioninfo part of location when building requests. The only reason it works for - * multi-region requests (e.g. batch) is that they happen to not use the region parts. - * This could be done cleaner (e.g. having a generic parameter and 2 derived classes, - * RegionCallable and actual RegionServerCallable with ServerName. + * Implementations make an rpc call against a RegionService via a protobuf Service. Implement + * #rpcCall(RpcController) and then call {@link #call(int)} to trigger the rpc. The + * {@link #call(int)} eventually invokes your #rpcCall(RpcController) meanwhile saving you having to + * write a bunch of boilerplate. The {@link #call(int)} implementation is from + * {@link RpcRetryingCaller} so rpcs are retried on fail. + *

    + * TODO: this class is actually tied to one region, because most of the paths make use of the + * regioninfo part of location when building requests. The only reason it works for multi-region + * requests (e.g. batch) is that they happen to not use the region parts. This could be done cleaner + * (e.g. having a generic parameter and 2 derived classes, RegionCallable and actual + * RegionServerCallable with ServerName. * @param the class that the ServerCallable handles */ @InterfaceAudience.Private @@ -46,7 +44,7 @@ public abstract class NoncedRegionServerCallable extends ClientServiceCallabl * @param tableName Table name to which row belongs. * @param row The row we want in tableName. */ - public NoncedRegionServerCallable(Connection connection, TableName tableName, byte [] row, + public NoncedRegionServerCallable(Connection connection, TableName tableName, byte[] row, HBaseRpcController rpcController, int priority) { super(connection, tableName, row, rpcController, priority); this.nonce = getConnection().getNonceGenerator().newNonce(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java index 982ec5b0065b..5aeb1663694a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NormalizeTableFilterParams.java @@ -24,25 +24,16 @@ /** * A collection of criteria used for table selection. The logic of table selection is as follows: *

      - *
    • - * When no parameter values are provided, an unfiltered list of all user tables is returned. - *
    • - *
    • - * When a list of {@link TableName TableNames} are provided, the filter starts with any of - * these tables that exist. - *
    • - *
    • - * When a {@code namespace} name is provided, the filter starts with all the tables present in - * that namespace. - *
    • - *
    • - * If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, - * the {@link TableName} list is honored and the {@code namespace} name is ignored. - *
    • - *
    • - * If a {@code regex} is provided, this subset of {@link TableName TableNames} is further - * reduced to those that match the provided regular expression. - *
    • + *
    • When no parameter values are provided, an unfiltered list of all user tables is returned. + *
    • + *
    • When a list of {@link TableName TableNames} are provided, the filter starts with any of these + * tables that exist.
    • + *
    • When a {@code namespace} name is provided, the filter starts with all the tables present in + * that namespace.
    • + *
    • If both a list of {@link TableName TableNames} and a {@code namespace} name are provided, the + * {@link TableName} list is honored and the {@code namespace} name is ignored.
    • + *
    • If a {@code regex} is provided, this subset of {@link TableName TableNames} is further + * reduced to those that match the provided regular expression.
    • *
    */ @InterfaceAudience.Public @@ -52,7 +43,7 @@ public final class NormalizeTableFilterParams { private final String namespace; private NormalizeTableFilterParams(final List tableNames, final String regex, - final String namespace) { + final String namespace) { this.tableNames = tableNames; this.regex = regex; this.namespace = namespace; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java index 115e55f336f6..71e200cf2d48 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OnlineLogRecord.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -40,23 +38,22 @@ final public class OnlineLogRecord extends LogEntry { // used to convert object to pretty printed format // used by toJsonPrettyPrint() - private static final Gson GSON = GsonUtil.createGson() - .setPrettyPrinting() - .registerTypeAdapter(OnlineLogRecord.class, (JsonSerializer) - (slowLogPayload, type, jsonSerializationContext) -> { - Gson gson = new Gson(); - JsonObject jsonObj = (JsonObject) gson.toJsonTree(slowLogPayload); - if (slowLogPayload.getMultiGetsCount() == 0) { - jsonObj.remove("multiGetsCount"); - } - if (slowLogPayload.getMultiMutationsCount() == 0) { - jsonObj.remove("multiMutationsCount"); - } - if (slowLogPayload.getMultiServiceCalls() == 0) { - jsonObj.remove("multiServiceCalls"); - } - return jsonObj; - }).create(); + private static final Gson GSON = + GsonUtil.createGson().setPrettyPrinting().registerTypeAdapter(OnlineLogRecord.class, + (JsonSerializer) (slowLogPayload, type, jsonSerializationContext) -> { + Gson gson = new Gson(); + JsonObject jsonObj = (JsonObject) gson.toJsonTree(slowLogPayload); + if (slowLogPayload.getMultiGetsCount() == 0) { + jsonObj.remove("multiGetsCount"); + } + if (slowLogPayload.getMultiMutationsCount() == 0) { + jsonObj.remove("multiMutationsCount"); + } + if (slowLogPayload.getMultiServiceCalls() == 0) { + jsonObj.remove("multiServiceCalls"); + } + return jsonObj; + }).create(); private final long startTime; private final int processingTime; @@ -239,9 +236,9 @@ public OnlineLogRecordBuilder setMultiServiceCalls(int multiServiceCalls) { } public OnlineLogRecord build() { - return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, - clientAddress, serverClass, methodName, callDetails, param, regionName, - userName, multiGetsCount, multiMutationsCount, multiServiceCalls); + return new OnlineLogRecord(startTime, processingTime, queueTime, responseSize, clientAddress, + serverClass, methodName, callDetails, param, regionName, userName, multiGetsCount, + multiMutationsCount, multiServiceCalls); } } @@ -257,42 +254,22 @@ public boolean equals(Object o) { OnlineLogRecord that = (OnlineLogRecord) o; - return new EqualsBuilder() - .append(startTime, that.startTime) - .append(processingTime, that.processingTime) - .append(queueTime, that.queueTime) - .append(responseSize, that.responseSize) - .append(multiGetsCount, that.multiGetsCount) - .append(multiMutationsCount, that.multiMutationsCount) - .append(multiServiceCalls, that.multiServiceCalls) - .append(clientAddress, that.clientAddress) - .append(serverClass, that.serverClass) - .append(methodName, that.methodName) - .append(callDetails, that.callDetails) - .append(param, that.param) - .append(regionName, that.regionName) - .append(userName, that.userName) - .isEquals(); + return new EqualsBuilder().append(startTime, that.startTime) + .append(processingTime, that.processingTime).append(queueTime, that.queueTime) + .append(responseSize, that.responseSize).append(multiGetsCount, that.multiGetsCount) + .append(multiMutationsCount, that.multiMutationsCount) + .append(multiServiceCalls, that.multiServiceCalls).append(clientAddress, that.clientAddress) + .append(serverClass, that.serverClass).append(methodName, that.methodName) + .append(callDetails, that.callDetails).append(param, that.param) + .append(regionName, that.regionName).append(userName, that.userName).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(17, 37) - .append(startTime) - .append(processingTime) - .append(queueTime) - .append(responseSize) - .append(clientAddress) - .append(serverClass) - .append(methodName) - .append(callDetails) - .append(param) - .append(regionName) - .append(userName) - .append(multiGetsCount) - .append(multiMutationsCount) - .append(multiServiceCalls) - .toHashCode(); + return new HashCodeBuilder(17, 37).append(startTime).append(processingTime).append(queueTime) + .append(responseSize).append(clientAddress).append(serverClass).append(methodName) + .append(callDetails).append(param).append(regionName).append(userName) + .append(multiGetsCount).append(multiMutationsCount).append(multiServiceCalls).toHashCode(); } @Override @@ -302,22 +279,14 @@ public String toJsonPrettyPrint() { @Override public String toString() { - return new ToStringBuilder(this) - .append("startTime", startTime) - .append("processingTime", processingTime) - .append("queueTime", queueTime) - .append("responseSize", responseSize) - .append("clientAddress", clientAddress) - .append("serverClass", serverClass) - .append("methodName", methodName) - .append("callDetails", callDetails) - .append("param", param) - .append("regionName", regionName) - .append("userName", userName) - .append("multiGetsCount", multiGetsCount) - .append("multiMutationsCount", multiMutationsCount) - .append("multiServiceCalls", multiServiceCalls) - .toString(); + return new ToStringBuilder(this).append("startTime", startTime) + .append("processingTime", processingTime).append("queueTime", queueTime) + .append("responseSize", responseSize).append("clientAddress", clientAddress) + .append("serverClass", serverClass).append("methodName", methodName) + .append("callDetails", callDetails).append("param", param).append("regionName", regionName) + .append("userName", userName).append("multiGetsCount", multiGetsCount) + .append("multiMutationsCount", multiMutationsCount) + .append("multiServiceCalls", multiServiceCalls).toString(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java index 3b6a6f5e51c4..5acd3ba282a4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Operation.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +19,12 @@ import java.io.IOException; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.JsonMapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Superclass for any type that maps to a potentially application-level query. - * (e.g. Put, Get, Delete, Scan, Next, etc.) - * Contains methods for exposure to logging and debugging tools. + * Superclass for any type that maps to a potentially application-level query. (e.g. Put, Get, + * Delete, Scan, Next, etc.) Contains methods for exposure to logging and debugging tools. */ @InterfaceAudience.Public public abstract class Operation { @@ -36,15 +33,15 @@ public abstract class Operation { private static final int DEFAULT_MAX_COLS = 5; /** - * Produces a Map containing a fingerprint which identifies the type and - * the static schema components of a query (i.e. column families) + * Produces a Map containing a fingerprint which identifies the type and the static schema + * components of a query (i.e. column families) * @return a map containing fingerprint information (i.e. column families) */ public abstract Map getFingerprint(); /** - * Produces a Map containing a summary of the details of a query - * beyond the scope of the fingerprint (i.e. columns, rows...) + * Produces a Map containing a summary of the details of a query beyond the scope of the + * fingerprint (i.e. columns, rows...) * @param maxCols a limit on the number of columns output prior to truncation * @return a map containing parameters of a query (i.e. rows, columns...) */ @@ -59,8 +56,7 @@ public Map toMap() { } /** - * Produces a JSON object for fingerprint and details exposure in a - * parseable format. + * Produces a JSON object for fingerprint and details exposure in a parseable format. * @param maxCols a limit on the number of columns to include in the JSON * @return a JSONObject containing this Operation's information, as a string */ @@ -69,8 +65,7 @@ public String toJSON(int maxCols) throws IOException { } /** - * Produces a JSON object sufficient for description of a query - * in a debugging or logging context. + * Produces a JSON object sufficient for description of a query in a debugging or logging context. * @return the produced JSON object, as a string */ public String toJSON() throws IOException { @@ -78,17 +73,16 @@ public String toJSON() throws IOException { } /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. - * @param maxCols a limit on the number of columns output in the summary - * prior to truncation + * Produces a string representation of this Operation. It defaults to a JSON representation, but + * falls back to a string representation of the fingerprint and details in the case of a JSON + * encoding failure. + * @param maxCols a limit on the number of columns output in the summary prior to truncation * @return a JSON-parseable String */ public String toString(int maxCols) { - /* for now this is merely a wrapper from producing a JSON string, but - * toJSON is kept separate in case this is changed to be a less parsable - * pretty printed representation. + /* + * for now this is merely a wrapper from producing a JSON string, but toJSON is kept separate in + * case this is changed to be a less parsable pretty printed representation. */ try { return toJSON(maxCols); @@ -98,9 +92,9 @@ public String toString(int maxCols) { } /** - * Produces a string representation of this Operation. It defaults to a JSON - * representation, but falls back to a string representation of the - * fingerprint and details in the case of a JSON encoding failure. + * Produces a string representation of this Operation. It defaults to a JSON representation, but + * falls back to a string representation of the fingerprint and details in the case of a JSON + * encoding failure. * @return String */ @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java index 7342e65bb316..d710533125b7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/OperationWithAttributes.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.util.Collections; @@ -39,18 +37,17 @@ public abstract class OperationWithAttributes extends Operation implements Attri private int priority = HConstants.PRIORITY_UNSET; /** - * empty construction. - * We need this empty construction to keep binary compatibility. + * empty construction. We need this empty construction to keep binary compatibility. */ protected OperationWithAttributes() { } protected OperationWithAttributes(OperationWithAttributes clone) { - this.attributes = clone.getAttributesMap() == null ? null : - clone.getAttributesMap().entrySet().stream() - .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue(), (k, v) -> { - throw new RuntimeException("collisions!!!"); - }, () -> new TreeMap<>())); + this.attributes = clone.getAttributesMap() == null ? null + : clone.getAttributesMap().entrySet().stream() + .collect(Collectors.toMap(e -> e.getKey(), e -> e.getValue(), (k, v) -> { + throw new RuntimeException("collisions!!!"); + }, () -> new TreeMap<>())); this.priority = clone.getPriority(); } @@ -96,7 +93,7 @@ protected long getAttributeSize() { long size = 0; if (attributes != null) { size += ClassSize.align(this.attributes.size() * ClassSize.MAP_ENTRY); - for(Map.Entry entry : this.attributes.entrySet()) { + for (Map.Entry entry : this.attributes.entrySet()) { size += ClassSize.align(ClassSize.STRING + entry.getKey().length()); size += ClassSize.align(ClassSize.ARRAY + entry.getValue().length); } @@ -105,13 +102,11 @@ protected long getAttributeSize() { } /** - * This method allows you to set an identifier on an operation. The original - * motivation for this was to allow the identifier to be used in slow query - * logging, but this could obviously be useful in other places. One use of - * this could be to put a class.method identifier in here to see where the - * slow query is coming from. - * @param id - * id to set for the scan + * This method allows you to set an identifier on an operation. The original motivation for this + * was to allow the identifier to be used in slow query logging, but this could obviously be + * useful in other places. One use of this could be to put a class.method identifier in here to + * see where the slow query is coming from. + * @param id id to set for the scan */ public OperationWithAttributes setId(String id) { setAttribute(ID_ATRIBUTE, Bytes.toBytes(id)); @@ -119,13 +114,12 @@ public OperationWithAttributes setId(String id) { } /** - * This method allows you to retrieve the identifier for the operation if one - * was set. + * This method allows you to retrieve the identifier for the operation if one was set. * @return the id or null if not set */ public String getId() { byte[] attr = getAttribute(ID_ATRIBUTE); - return attr == null? null: Bytes.toString(attr); + return attr == null ? null : Bytes.toString(attr); } public OperationWithAttributes setPriority(int priority) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java index 1b1ded9953bb..56a8dd19fcc5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PackagePrivateFieldAccessor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java index 8aedc4d2205c..7d9d27907631 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,8 +35,8 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator { private PerClientRandomNonceGenerator() { byte[] clientIdBase = ClientIdGenerator.generateClientId(); - this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) + - ThreadLocalRandom.current().nextInt(); + this.clientId = + (((long) Arrays.hashCode(clientIdBase)) << 32) + ThreadLocalRandom.current().nextInt(); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java index e35902ca78f0..1548110e8dc7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PreemptiveFastFailInterceptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.Map.Entry; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.commons.lang3.mutable.MutableBoolean; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; @@ -64,8 +63,7 @@ @InterfaceAudience.Private class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { - private static final Logger LOG = LoggerFactory - .getLogger(PreemptiveFastFailInterceptor.class); + private static final Logger LOG = LoggerFactory.getLogger(PreemptiveFastFailInterceptor.class); // amount of time to wait before we consider a server to be in fast fail // mode @@ -73,7 +71,8 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { // Keeps track of failures when we cannot talk to a server. Helps in // fast failing clients if the server is down for a long time. - protected final ConcurrentMap repeatedFailuresMap = new ConcurrentHashMap<>(); + protected final ConcurrentMap repeatedFailuresMap = + new ConcurrentHashMap<>(); // We populate repeatedFailuresMap every time there is a failure. So, to // keep it from growing unbounded, we garbage collect the failure information @@ -90,31 +89,26 @@ class PreemptiveFastFailInterceptor extends RetryingCallerInterceptor { private final ThreadLocal threadRetryingInFastFailMode = new ThreadLocal<>(); public PreemptiveFastFailInterceptor(Configuration conf) { - this.fastFailThresholdMilliSec = conf.getLong( - HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS, - HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS_DEFAULT); - this.failureMapCleanupIntervalMilliSec = conf.getLong( - HConstants.HBASE_CLIENT_FAILURE_MAP_CLEANUP_INTERVAL_MS, - HConstants.HBASE_CLIENT_FAILURE_MAP_CLEANUP_INTERVAL_MS_DEFAULT); - this.fastFailClearingTimeMilliSec = conf.getLong( - HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_MS_DURATION_MS, - HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_DURATION_MS_DEFAULT); + this.fastFailThresholdMilliSec = conf.getLong(HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS, + HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS_DEFAULT); + this.failureMapCleanupIntervalMilliSec = + conf.getLong(HConstants.HBASE_CLIENT_FAILURE_MAP_CLEANUP_INTERVAL_MS, + HConstants.HBASE_CLIENT_FAILURE_MAP_CLEANUP_INTERVAL_MS_DEFAULT); + this.fastFailClearingTimeMilliSec = + conf.getLong(HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_MS_DURATION_MS, + HConstants.HBASE_CLIENT_FAST_FAIL_CLEANUP_DURATION_MS_DEFAULT); lastFailureMapCleanupTimeMilliSec = EnvironmentEdgeManager.currentTime(); } - public void intercept(FastFailInterceptorContext context) - throws PreemptiveFastFailException { + public void intercept(FastFailInterceptorContext context) throws PreemptiveFastFailException { context.setFailureInfo(repeatedFailuresMap.get(context.getServer())); if (inFastFailMode(context.getServer()) && !currentThreadInFastFailMode()) { // In Fast-fail mode, all but one thread will fast fail. Check // if we are that one chosen thread. - context.setRetryDespiteFastFailMode(shouldRetryInspiteOfFastFail(context - .getFailureInfo())); + context.setRetryDespiteFastFailMode(shouldRetryInspiteOfFastFail(context.getFailureInfo())); if (!context.isRetryDespiteFastFailMode()) { // we don't have to retry - LOG.debug("Throwing PFFE : " + context.getFailureInfo() + " tries : " - + context.getTries()); - throw new PreemptiveFastFailException( - context.getFailureInfo().numConsecutiveFailures.get(), + LOG.debug("Throwing PFFE : " + context.getFailureInfo() + " tries : " + context.getTries()); + throw new PreemptiveFastFailException(context.getFailureInfo().numConsecutiveFailures.get(), context.getFailureInfo().timeOfFirstFailureMilliSec, context.getFailureInfo().timeOfLatestAttemptMilliSec, context.getServer(), context.getGuaranteedClientSideOnly().isTrue()); @@ -123,28 +117,23 @@ public void intercept(FastFailInterceptorContext context) context.setDidTry(true); } - public void handleFailure(FastFailInterceptorContext context, - Throwable t) throws IOException { - handleThrowable(t, context.getServer(), - context.getCouldNotCommunicateWithServer(), - context.getGuaranteedClientSideOnly()); + public void handleFailure(FastFailInterceptorContext context, Throwable t) throws IOException { + handleThrowable(t, context.getServer(), context.getCouldNotCommunicateWithServer(), + context.getGuaranteedClientSideOnly()); } public void updateFailureInfo(FastFailInterceptorContext context) { - updateFailureInfoForServer(context.getServer(), context.getFailureInfo(), - context.didTry(), context.getCouldNotCommunicateWithServer() - .booleanValue(), context.isRetryDespiteFastFailMode()); + updateFailureInfoForServer(context.getServer(), context.getFailureInfo(), context.didTry(), + context.getCouldNotCommunicateWithServer().booleanValue(), + context.isRetryDespiteFastFailMode()); } /** - * Handles failures encountered when communicating with a server. - * - * Updates the FailureInfo in repeatedFailuresMap to reflect the failure. - * Throws RepeatedConnectException if the client is in Fast fail mode. - * + * Handles failures encountered when communicating with a server. Updates the FailureInfo in + * repeatedFailuresMap to reflect the failure. Throws RepeatedConnectException if the client is in + * Fast fail mode. * @param serverName - * @param t - * - the throwable to be handled. + * @param t - the throwable to be handled. * @throws PreemptiveFastFailException */ protected void handleFailureToServer(ServerName serverName, Throwable t) { @@ -153,14 +142,14 @@ protected void handleFailureToServer(ServerName serverName, Throwable t) { } long currentTime = EnvironmentEdgeManager.currentTime(); FailureInfo fInfo = - computeIfAbsent(repeatedFailuresMap, serverName, () -> new FailureInfo(currentTime)); + computeIfAbsent(repeatedFailuresMap, serverName, () -> new FailureInfo(currentTime)); fInfo.timeOfLatestAttemptMilliSec = currentTime; fInfo.numConsecutiveFailures.incrementAndGet(); } public void handleThrowable(Throwable t1, ServerName serverName, - MutableBoolean couldNotCommunicateWithServer, - MutableBoolean guaranteedClientSideOnly) throws IOException { + MutableBoolean couldNotCommunicateWithServer, MutableBoolean guaranteedClientSideOnly) + throws IOException { Throwable t2 = ClientExceptionsUtil.translatePFFE(t1); boolean isLocalException = !(t2 instanceof RemoteException); @@ -172,38 +161,38 @@ public void handleThrowable(Throwable t1, ServerName serverName, } /** - * Occasionally cleans up unused information in repeatedFailuresMap. - * - * repeatedFailuresMap stores the failure information for all remote hosts - * that had failures. In order to avoid these from growing indefinitely, - * occassionallyCleanupFailureInformation() will clear these up once every + * Occasionally cleans up unused information in repeatedFailuresMap. repeatedFailuresMap stores + * the failure information for all remote hosts that had failures. In order to avoid these from + * growing indefinitely, occassionallyCleanupFailureInformation() will clear these up once every * cleanupInterval ms. */ protected void occasionallyCleanupFailureInformation() { long now = EnvironmentEdgeManager.currentTime(); - if (!(now > lastFailureMapCleanupTimeMilliSec - + failureMapCleanupIntervalMilliSec)) - return; + if (!(now > lastFailureMapCleanupTimeMilliSec + failureMapCleanupIntervalMilliSec)) return; // remove entries that haven't been attempted in a while // No synchronization needed. It is okay if multiple threads try to // remove the entry again and again from a concurrent hash map. StringBuilder sb = new StringBuilder(); for (Entry entry : repeatedFailuresMap.entrySet()) { - if (now > entry.getValue().timeOfLatestAttemptMilliSec - + failureMapCleanupIntervalMilliSec) { // no recent failures + if (now > entry.getValue().timeOfLatestAttemptMilliSec + failureMapCleanupIntervalMilliSec) { // no + // recent + // failures repeatedFailuresMap.remove(entry.getKey()); - } else if (now > entry.getValue().timeOfFirstFailureMilliSec - + this.fastFailClearingTimeMilliSec) { // been failing for a long - // time - LOG.error(entry.getKey() - + " been failing for a long time. clearing out." - + entry.getValue().toString()); - repeatedFailuresMap.remove(entry.getKey()); - } else { - sb.append(entry.getKey().toString()).append(" failing ") - .append(entry.getValue().toString()).append("\n"); - } + } else + if (now > entry.getValue().timeOfFirstFailureMilliSec + this.fastFailClearingTimeMilliSec) { // been + // failing + // for + // a + // long + // time + LOG.error(entry.getKey() + " been failing for a long time. clearing out." + + entry.getValue().toString()); + repeatedFailuresMap.remove(entry.getKey()); + } else { + sb.append(entry.getKey().toString()).append(" failing ") + .append(entry.getValue().toString()).append("\n"); + } } if (sb.length() > 0) { LOG.warn("Preemptive failure enabled for : " + sb.toString()); @@ -212,11 +201,9 @@ protected void occasionallyCleanupFailureInformation() { } /** - * Checks to see if we are in the Fast fail mode for requests to the server. - * - * If a client is unable to contact a server for more than - * fastFailThresholdMilliSec the client will get into fast fail mode. - * + * Checks to see if we are in the Fast fail mode for requests to the server. If a client is unable + * to contact a server for more than fastFailThresholdMilliSec the client will get into fast fail + * mode. * @param server * @return true if the client is in fast fail mode for the server. */ @@ -225,30 +212,23 @@ private boolean inFastFailMode(ServerName server) { // if fInfo is null --> The server is considered good. // If the server is bad, wait long enough to believe that the server is // down. - return (fInfo != null && - EnvironmentEdgeManager.currentTime() > - (fInfo.timeOfFirstFailureMilliSec + this.fastFailThresholdMilliSec)); + return (fInfo != null && EnvironmentEdgeManager + .currentTime() > (fInfo.timeOfFirstFailureMilliSec + this.fastFailThresholdMilliSec)); } /** - * Checks to see if the current thread is already in FastFail mode for *some* - * server. - * + * Checks to see if the current thread is already in FastFail mode for *some* server. * @return true, if the thread is already in FF mode. */ private boolean currentThreadInFastFailMode() { - return (this.threadRetryingInFastFailMode.get() != null && (this.threadRetryingInFastFailMode - .get().booleanValue() == true)); + return (this.threadRetryingInFastFailMode.get() != null + && (this.threadRetryingInFastFailMode.get().booleanValue() == true)); } /** - * Check to see if the client should try to connnect to the server, inspite of - * knowing that it is in the fast fail mode. - * - * The idea here is that we want just one client thread to be actively trying - * to reconnect, while all the other threads trying to reach the server will - * short circuit. - * + * Check to see if the client should try to connnect to the server, inspite of knowing that it is + * in the fast fail mode. The idea here is that we want just one client thread to be actively + * trying to reconnect, while all the other threads trying to reach the server will short circuit. * @param fInfo * @return true if the client should try to connect to the server. */ @@ -257,10 +237,8 @@ protected boolean shouldRetryInspiteOfFastFail(FailureInfo fInfo) { // client // actively trying to connect. If we are the chosen one, we will retry // and not throw an exception. - if (fInfo != null - && fInfo.exclusivelyRetringInspiteOfFastFail.compareAndSet(false, true)) { - MutableBoolean threadAlreadyInFF = this.threadRetryingInFastFailMode - .get(); + if (fInfo != null && fInfo.exclusivelyRetringInspiteOfFastFail.compareAndSet(false, true)) { + MutableBoolean threadAlreadyInFF = this.threadRetryingInFastFailMode.get(); if (threadAlreadyInFF == null) { threadAlreadyInFF = new MutableBoolean(); this.threadRetryingInFastFailMode.set(threadAlreadyInFF); @@ -273,20 +251,15 @@ protected boolean shouldRetryInspiteOfFastFail(FailureInfo fInfo) { } /** - * - * This function updates the Failure info for a particular server after the - * attempt to - * + * This function updates the Failure info for a particular server after the attempt to * @param server * @param fInfo * @param couldNotCommunicate * @param retryDespiteFastFailMode */ - private void updateFailureInfoForServer(ServerName server, - FailureInfo fInfo, boolean didTry, boolean couldNotCommunicate, - boolean retryDespiteFastFailMode) { - if (server == null || fInfo == null || didTry == false) - return; + private void updateFailureInfoForServer(ServerName server, FailureInfo fInfo, boolean didTry, + boolean couldNotCommunicate, boolean retryDespiteFastFailMode) { + if (server == null || fInfo == null || didTry == false) return; // If we were able to connect to the server, reset the failure // information. @@ -317,8 +290,8 @@ public void intercept(RetryingCallerInterceptorContext context) } @Override - public void handleFailure(RetryingCallerInterceptorContext context, - Throwable t) throws IOException { + public void handleFailure(RetryingCallerInterceptorContext context, Throwable t) + throws IOException { if (context instanceof FastFailInterceptorContext) { handleFailure((FastFailInterceptorContext) context, t); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java index 702717038c30..30d369e9fa9a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Put.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; import java.io.IOException; @@ -38,10 +36,9 @@ /** * Used to perform Put operations for a single row. *

    - * To perform a Put, instantiate a Put object with the row to insert to, and - * for each column to be inserted, execute {@link #addColumn(byte[], byte[], - * byte[]) add} or {@link #addColumn(byte[], byte[], long, byte[]) add} if - * setting the timestamp. + * To perform a Put, instantiate a Put object with the row to insert to, and for each column to be + * inserted, execute {@link #addColumn(byte[], byte[], byte[]) add} or + * {@link #addColumn(byte[], byte[], long, byte[]) add} if setting the timestamp. */ @InterfaceAudience.Public public class Put extends Mutation implements HeapSize { @@ -49,13 +46,12 @@ public class Put extends Mutation implements HeapSize { * Create a Put operation for the specified row. * @param row row key */ - public Put(byte [] row) { + public Put(byte[] row) { this(row, HConstants.LATEST_TIMESTAMP); } /** * Create a Put operation for the specified row, using a given timestamp. - * * @param row row key; we make a copy of what we are passed to keep local. * @param ts timestamp */ @@ -69,13 +65,13 @@ public Put(byte[] row, long ts) { * @param rowOffset * @param rowLength */ - public Put(byte [] rowArray, int rowOffset, int rowLength) { + public Put(byte[] rowArray, int rowOffset, int rowLength) { this(rowArray, rowOffset, rowLength, HConstants.LATEST_TIMESTAMP); } /** * @param row row key; we make a copy of what we are passed to keep local. - * @param ts timestamp + * @param ts timestamp */ public Put(ByteBuffer row, long ts) { if (ts < 0) { @@ -101,7 +97,7 @@ public Put(ByteBuffer row) { * @param rowLength * @param ts */ - public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) { + public Put(byte[] rowArray, int rowOffset, int rowLength, long ts) { checkRow(rowArray, rowOffset, rowLength); this.row = Bytes.copy(rowArray, rowOffset, rowLength); this.ts = ts; @@ -112,24 +108,20 @@ public Put(byte [] rowArray, int rowOffset, int rowLength, long ts) { /** * Create a Put operation for an immutable row key. - * * @param row row key - * @param rowIsImmutable whether the input row is immutable. - * Set to true if the caller can guarantee that - * the row will not be changed for the Put duration. + * @param rowIsImmutable whether the input row is immutable. Set to true if the caller can + * guarantee that the row will not be changed for the Put duration. */ - public Put(byte [] row, boolean rowIsImmutable) { + public Put(byte[] row, boolean rowIsImmutable) { this(row, HConstants.LATEST_TIMESTAMP, rowIsImmutable); } /** * Create a Put operation for an immutable row key, using a given timestamp. - * * @param row row key * @param ts timestamp - * @param rowIsImmutable whether the input row is immutable. - * Set to true if the caller can guarantee that - * the row will not be changed for the Put duration. + * @param rowIsImmutable whether the input row is immutable. Set to true if the caller can + * guarantee that the row will not be changed for the Put duration. */ public Put(byte[] row, long ts, boolean rowIsImmutable) { // Check and set timestamp @@ -140,15 +132,15 @@ public Put(byte[] row, long ts, boolean rowIsImmutable) { // Deal with row according to rowIsImmutable checkRow(row); - if (rowIsImmutable) { // Row is immutable - this.row = row; // Do not make a local copy, but point to the provided byte array directly - } else { // Row is not immutable - this.row = Bytes.copy(row, 0, row.length); // Make a local copy + if (rowIsImmutable) { // Row is immutable + this.row = row; // Do not make a local copy, but point to the provided byte array directly + } else { // Row is not immutable + this.row = Bytes.copy(row, 0, row.length); // Make a local copy } } /** - * Copy constructor. Creates a Put operation cloned from the specified Put. + * Copy constructor. Creates a Put operation cloned from the specified Put. * @param putToCopy put to copy */ public Put(Put putToCopy) { @@ -156,14 +148,13 @@ public Put(Put putToCopy) { } /** - * Construct the Put with user defined data. NOTED: - * 1) all cells in the familyMap must have the Type.Put - * 2) the row of each cell must be same with passed row. + * Construct the Put with user defined data. NOTED: 1) all cells in the familyMap must have the + * Type.Put 2) the row of each cell must be same with passed row. * @param row row. CAN'T be null * @param ts timestamp * @param familyMap the map to collect all cells internally. CAN'T be null */ - public Put(byte[] row, long ts, NavigableMap> familyMap) { + public Put(byte[] row, long ts, NavigableMap> familyMap) { super(row, ts, familyMap); } @@ -174,32 +165,31 @@ public Put(byte[] row, long ts, NavigableMap> familyMap) { * @param value column value * @return this */ - public Put addColumn(byte [] family, byte [] qualifier, byte [] value) { + public Put addColumn(byte[] family, byte[] qualifier, byte[] value) { return addColumn(family, qualifier, this.ts, value); } /** - * See {@link #addColumn(byte[], byte[], byte[])}. This version expects - * that the underlying arrays won't change. It's intended - * for usage internal HBase to and for advanced client applications. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #add(Cell)} and {@link org.apache.hadoop.hbase.CellBuilder} instead + * See {@link #addColumn(byte[], byte[], byte[])}. This version expects that the underlying arrays + * won't change. It's intended for usage internal HBase to and for advanced client applications. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)} + * and {@link org.apache.hadoop.hbase.CellBuilder} instead */ @Deprecated - public Put addImmutable(byte [] family, byte [] qualifier, byte [] value) { + public Put addImmutable(byte[] family, byte[] qualifier, byte[] value) { return addImmutable(family, qualifier, this.ts, value); } /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. + * Add the specified column and value, with the specified timestamp as its version to this Put + * operation. * @param family family name * @param qualifier column qualifier * @param ts version timestamp * @param value column value * @return this */ - public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) { + public Put addColumn(byte[] family, byte[] qualifier, long ts, byte[] value) { if (ts < 0) { throw new IllegalArgumentException("Timestamp cannot be negative. ts=" + ts); } @@ -210,15 +200,16 @@ public Put addColumn(byte [] family, byte [] qualifier, long ts, byte [] value) } /** - * See {@link #addColumn(byte[], byte[], long, byte[])}. This version expects - * that the underlying arrays won't change. It's intended - * for usage internal HBase to and for advanced client applications. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #add(Cell)} and {@link org.apache.hadoop.hbase.CellBuilder} instead + * See {@link #addColumn(byte[], byte[], long, byte[])}. This version expects that the underlying + * arrays won't change. It's intended for usage internal HBase to and for advanced client + * applications. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)} + * and {@link org.apache.hadoop.hbase.CellBuilder} instead */ @Deprecated - public Put addImmutable(byte [] family, byte [] qualifier, long ts, byte [] value) { - // Family can not be null, otherwise NullPointerException is thrown when putting the cell into familyMap + public Put addImmutable(byte[] family, byte[] qualifier, long ts, byte[] value) { + // Family can not be null, otherwise NullPointerException is thrown when putting the cell into + // familyMap if (family == null) { throw new IllegalArgumentException("Family cannot be null"); } @@ -229,13 +220,14 @@ public Put addImmutable(byte [] family, byte [] qualifier, long ts, byte [] valu } List list = getCellList(family); - list.add(new IndividualBytesFieldCell(this.row, family, qualifier, ts, KeyValue.Type.Put, value)); + list.add( + new IndividualBytesFieldCell(this.row, family, qualifier, ts, KeyValue.Type.Put, value)); return this; } /** - * Add the specified column and value, with the specified timestamp as - * its version to this Put operation. + * Add the specified column and value, with the specified timestamp as its version to this Put + * operation. * @param family family name * @param qualifier column qualifier * @param ts version timestamp @@ -253,11 +245,11 @@ public Put addColumn(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer va } /** - * See {@link #addColumn(byte[], ByteBuffer, long, ByteBuffer)}. This version expects - * that the underlying arrays won't change. It's intended - * for usage internal HBase to and for advanced client applications. - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link #add(Cell)} and {@link org.apache.hadoop.hbase.CellBuilder} instead + * See {@link #addColumn(byte[], ByteBuffer, long, ByteBuffer)}. This version expects that the + * underlying arrays won't change. It's intended for usage internal HBase to and for advanced + * client applications. + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use {@link #add(Cell)} + * and {@link org.apache.hadoop.hbase.CellBuilder} instead */ @Deprecated public Put addImmutable(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer value) { @@ -271,9 +263,8 @@ public Put addImmutable(byte[] family, ByteBuffer qualifier, long ts, ByteBuffer } /** - * Add the specified KeyValue to this Put operation. Operation assumes that - * the passed KeyValue is immutable and its backing array will not be modified - * for the duration of this Put. + * Add the specified KeyValue to this Put operation. Operation assumes that the passed KeyValue is + * immutable and its backing array will not be modified for the duration of this Put. * @param cell individual cell * @return this * @throws java.io.IOException e @@ -306,8 +297,8 @@ public Put setDurability(Durability d) { /** * Method for setting the put's familyMap - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. - * Use {@link Put#Put(byte[], long, NavigableMap)} instead + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0. Use + * {@link Put#Put(byte[], long, NavigableMap)} instead */ @Deprecated @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index 1d990d1bc942..af17f81762aa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,6 @@ package org.apache.hadoop.hbase.client; import java.util.Map; - -import org.apache.hbase.thirdparty.com.google.common.collect.Maps; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.TimeRange; @@ -29,11 +26,14 @@ import org.apache.hadoop.hbase.security.access.Permission; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.VisibilityConstants; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; -import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hbase.thirdparty.com.google.common.collect.Maps; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** * Base class for HBase read operations; e.g. Scan and Get. @@ -46,6 +46,7 @@ public abstract class Query extends OperationWithAttributes { protected Consistency consistency = Consistency.STRONG; protected Map colFamTimeRangeMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR); protected Boolean loadColumnFamiliesOnDemand = null; + /** * @return Filter */ @@ -70,8 +71,8 @@ public Query setFilter(Filter filter) { * @param authorizations */ public Query setAuthorizations(Authorizations authorizations) { - this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, ProtobufUtil - .toAuthorizations(authorizations).toByteArray()); + this.setAttribute(VisibilityConstants.VISIBILITY_LABELS_ATTR_KEY, + ProtobufUtil.toAuthorizations(authorizations).toByteArray()); return this; } @@ -111,7 +112,7 @@ public Query setACL(Map perms) { permMap.put(entry.getKey(), entry.getValue()); } setAttribute(AccessControlConstants.OP_ATTRIBUTE_ACL, - AccessControlUtil.toUsersAndPermissions(permMap).toByteArray()); + AccessControlUtil.toUsersAndPermissions(permMap).toByteArray()); return this; } @@ -134,9 +135,9 @@ public Query setConsistency(Consistency consistency) { /** * Specify region replica id where Query will fetch data from. Use this together with - * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from - * a specific replicaId. - *
    Expert: This is an advanced API exposed. Only use it if you know what you are doing + * {@link #setConsistency(Consistency)} passing {@link Consistency#TIMELINE} to read data from a + * specific replicaId.
    + * Expert: This is an advanced API exposed. Only use it if you know what you are doing * @param Id */ public Query setReplicaId(int Id) { @@ -153,14 +154,10 @@ public int getReplicaId() { } /** - * Set the isolation level for this query. If the - * isolation level is set to READ_UNCOMMITTED, then - * this query will return data from committed and - * uncommitted transactions. If the isolation level - * is set to READ_COMMITTED, then this query will return - * data from committed transactions only. If a isolation - * level is not explicitly set on a Query, then it - * is assumed to be READ_COMMITTED. + * Set the isolation level for this query. If the isolation level is set to READ_UNCOMMITTED, then + * this query will return data from committed and uncommitted transactions. If the isolation level + * is set to READ_COMMITTED, then this query will return data from committed transactions only. If + * a isolation level is not explicitly set on a Query, then it is assumed to be READ_COMMITTED. * @param level IsolationLevel for this query */ public Query setIsolationLevel(IsolationLevel level) { @@ -169,32 +166,28 @@ public Query setIsolationLevel(IsolationLevel level) { } /** - * @return The isolation level of this query. - * If no isolation level was set for this query object, - * then it returns READ_COMMITTED. + * @return The isolation level of this query. If no isolation level was set for this query object, + * then it returns READ_COMMITTED. * @return The IsolationLevel for this query */ public IsolationLevel getIsolationLevel() { byte[] attr = getAttribute(ISOLATION_LEVEL); - return attr == null ? IsolationLevel.READ_COMMITTED : - IsolationLevel.fromBytes(attr); - } - - /** - * Set the value indicating whether loading CFs on demand should be allowed (cluster - * default is false). On-demand CF loading doesn't load column families until necessary, e.g. - * if you filter on one column, the other column family data will be loaded only for the rows - * that are included in result, not all rows like in normal case. - * With column-specific filters, like SingleColumnValueFilter w/filterIfMissing == true, - * this can deliver huge perf gains when there's a cf with lots of data; however, it can - * also lead to some inconsistent results, as follows: - * - if someone does a concurrent update to both column families in question you may get a row - * that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" } } - * someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent scan - * filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, - * { video => "my dog" } }. - * - if there's a concurrent split and you have more than 2 column families, some rows may be - * missing some column families. + return attr == null ? IsolationLevel.READ_COMMITTED : IsolationLevel.fromBytes(attr); + } + + /** + * Set the value indicating whether loading CFs on demand should be allowed (cluster default is + * false). On-demand CF loading doesn't load column families until necessary, e.g. if you filter + * on one column, the other column family data will be loaded only for the rows that are included + * in result, not all rows like in normal case. With column-specific filters, like + * SingleColumnValueFilter w/filterIfMissing == true, this can deliver huge perf gains when + * there's a cf with lots of data; however, it can also lead to some inconsistent results, as + * follows: - if someone does a concurrent update to both column families in question you may get + * a row that never existed, e.g. for { rowKey = 5, { cat_videos => 1 }, { video => "my cat" + * } } someone puts rowKey 5 with { cat_videos => 0 }, { video => "my dog" }, concurrent + * scan filtering on "cat_videos == 1" can get { rowKey = 5, { cat_videos => 1 }, { video => + * "my dog" } }. - if there's a concurrent split and you have more than 2 column families, some + * rows may be missing some column families. */ public Query setLoadColumnFamiliesOnDemand(boolean value) { this.loadColumnFamiliesOnDemand = value; @@ -212,18 +205,15 @@ public Boolean getLoadColumnFamiliesOnDemandValue() { * Get the logical value indicating whether on-demand CF loading should be allowed. */ public boolean doLoadColumnFamiliesOnDemand() { - return (this.loadColumnFamiliesOnDemand != null) - && this.loadColumnFamiliesOnDemand; + return (this.loadColumnFamiliesOnDemand != null) && this.loadColumnFamiliesOnDemand; } /** - * Get versions of columns only within the specified timestamp range, - * [minStamp, maxStamp) on a per CF bases. Note, default maximum versions to return is 1. If - * your time range spans more than one version and you want all versions - * returned, up the number of versions beyond the default. + * Get versions of columns only within the specified timestamp range, [minStamp, maxStamp) on a + * per CF bases. Note, default maximum versions to return is 1. If your time range spans more than + * one version and you want all versions returned, up the number of versions beyond the default. * Column Family time ranges take precedence over the global time range. - * - * @param cf the column family for which you want to restrict + * @param cf the column family for which you want to restrict * @param minStamp minimum timestamp value, inclusive * @param maxStamp maximum timestamp value, exclusive * @return this diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java index 29698c201336..219441e6bb04 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RawAsyncHBaseAdmin.java @@ -204,8 +204,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsProcedureDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsRpcThrottleEnabledResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .IsSnapshotCleanupEnabledResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotDoneResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; @@ -260,8 +259,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetQuotaResponse; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .SetSnapshotCleanupResponse; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ShutdownRequest; @@ -351,8 +349,8 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { this.pauseNs = builder.pauseNs; if (builder.pauseForCQTBENs < builder.pauseNs) { LOG.warn( - "Configured value of pauseForCQTBENs is {} ms, which is less than" + - " the normal pause value {} ms, use the greater one instead", + "Configured value of pauseForCQTBENs is {} ms, which is less than" + + " the normal pause value {} ms, use the greater one instead", TimeUnit.NANOSECONDS.toMillis(builder.pauseForCQTBENs), TimeUnit.NANOSECONDS.toMillis(builder.pauseNs)); this.pauseForCQTBENs = builder.pauseNs; @@ -366,18 +364,18 @@ class RawAsyncHBaseAdmin implements AsyncAdmin { private MasterRequestCallerBuilder newMasterCaller() { return this.connection.callerFactory. masterRequest() - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) - .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) + .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); } private AdminRequestCallerBuilder newAdminCaller() { return this.connection.callerFactory. adminRequest() - .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) - .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) - .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) - .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); + .rpcTimeout(rpcTimeoutNs, TimeUnit.NANOSECONDS) + .operationTimeout(operationTimeoutNs, TimeUnit.NANOSECONDS) + .pause(pauseNs, TimeUnit.NANOSECONDS).pauseForCQTBE(pauseForCQTBENs, TimeUnit.NANOSECONDS) + .maxAttempts(maxAttempts).startLogErrorsCnt(startLogErrorsCnt); } @FunctionalInterface @@ -482,8 +480,8 @@ public CompletableFuture tableExists(TableName tableName) { @Override public CompletableFuture> listTableDescriptors(boolean includeSysTables) { - return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(null, - includeSysTables)); + return getTableDescriptors( + RequestConverter.buildGetTableDescriptorsRequest(null, includeSysTables)); } /** @@ -494,8 +492,8 @@ public CompletableFuture> listTableDescriptors(Pattern pat boolean includeSysTables) { Preconditions.checkNotNull(pattern, "pattern is null. If you don't specify a pattern, use listTables(boolean) instead"); - return getTableDescriptors(RequestConverter.buildGetTableDescriptorsRequest(pattern, - includeSysTables)); + return getTableDescriptors( + RequestConverter.buildGetTableDescriptorsRequest(pattern, includeSysTables)); } @Override @@ -524,28 +522,26 @@ public CompletableFuture> listTableNames(boolean includeSysTable } @Override - public CompletableFuture> - listTableNames(Pattern pattern, boolean includeSysTables) { + public CompletableFuture> listTableNames(Pattern pattern, + boolean includeSysTables) { Preconditions.checkNotNull(pattern, - "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead"); + "pattern is null. If you don't specify a pattern, use listTableNames(boolean) instead"); return getTableNames(RequestConverter.buildGetTableNamesRequest(pattern, includeSysTables)); } private CompletableFuture> getTableNames(GetTableNamesRequest request) { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this - .> call(controller, - stub, request, (s, c, req, done) -> s.getTableNames(c, req, done), - (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))).call(); + return this.> newMasterCaller() + .action((controller, stub) -> this + .> call(controller, stub, + request, (s, c, req, done) -> s.getTableNames(c, req, done), + (resp) -> ProtobufUtil.toTableNameList(resp.getTableNamesList()))) + .call(); } @Override public CompletableFuture> listTableDescriptorsByNamespace(String name) { return this.> newMasterCaller().action((controller, stub) -> this - .> call( + .> call( controller, stub, ListTableDescriptorsByNamespaceRequest.newBuilder().setNamespaceName(name).build(), (s, c, req, done) -> s.listTableDescriptorsByNamespace(c, req, done), @@ -556,8 +552,7 @@ List> call( @Override public CompletableFuture> listTableNamesByNamespace(String name) { return this.> newMasterCaller().action((controller, stub) -> this - .> call( + .> call( controller, stub, ListTableNamesByNamespaceRequest.newBuilder().setNamespaceName(name).build(), (s, c, req, done) -> s.listTableNamesByNamespace(c, req, done), @@ -569,12 +564,13 @@ List> call( public CompletableFuture getDescriptor(TableName tableName) { CompletableFuture future = new CompletableFuture<>(); addListener(this.> newMasterCaller().priority(tableName) - .action((controller, stub) -> this - .> call( - controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), - (s, c, req, done) -> s.getTableDescriptors(c, req, done), - (resp) -> resp.getTableSchemaList())) - .call(), (tableSchemas, error) -> { + .action((controller, stub) -> this + .> call( + controller, stub, RequestConverter.buildGetTableDescriptorsRequest(tableName), + (s, c, req, done) -> s.getTableDescriptors(c, req, done), + (resp) -> resp.getTableSchemaList())) + .call(), + (tableSchemas, error) -> { if (error != null) { future.completeExceptionally(error); return; @@ -628,20 +624,21 @@ private CompletableFuture createTable(TableName tableName, CreateTableRequ public CompletableFuture modifyTable(TableDescriptor desc) { return this. procedureCall(desc.getTableName(), RequestConverter.buildModifyTableRequest(desc.getTableName(), desc, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.modifyTable(c, req, done), - (resp) -> resp.getProcId(), new ModifyTableProcedureBiConsumer(this, desc.getTableName())); + ng.newNonce()), + (s, c, req, done) -> s.modifyTable(c, req, done), (resp) -> resp.getProcId(), + new ModifyTableProcedureBiConsumer(this, desc.getTableName())); } @Override public CompletableFuture modifyTableStoreFileTracker(TableName tableName, String dstSFT) { return this - . procedureCall( - tableName, - RequestConverter.buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, - ng.getNonceGroup(), ng.newNonce()), - (s, c, req, done) -> s.modifyTableStoreFileTracker(c, req, done), - (resp) -> resp.getProcId(), - new ModifyTableStoreFileTrackerProcedureBiConsumer(this, tableName)); + . procedureCall( + tableName, + RequestConverter.buildModifyTableStoreFileTrackerRequest(tableName, dstSFT, + ng.getNonceGroup(), ng.newNonce()), + (s, c, req, done) -> s.modifyTableStoreFileTracker(c, req, done), + (resp) -> resp.getProcId(), + new ModifyTableStoreFileTrackerProcedureBiConsumer(this, tableName)); } @Override @@ -656,8 +653,9 @@ public CompletableFuture deleteTable(TableName tableName) { public CompletableFuture truncateTable(TableName tableName, boolean preserveSplits) { return this. procedureCall(tableName, RequestConverter.buildTruncateTableRequest(tableName, preserveSplits, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.truncateTable(c, req, done), - (resp) -> resp.getProcId(), new TruncateTableProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.truncateTable(c, req, done), (resp) -> resp.getProcId(), + new TruncateTableProcedureBiConsumer(tableName)); } @Override @@ -677,9 +675,9 @@ public CompletableFuture disableTable(TableName tableName) { } /** - * Utility for completing passed TableState {@link CompletableFuture} future - * using passed parameters. Sets error or boolean result ('true' if table matches - * the passed-in targetState). + * Utility for completing passed TableState {@link CompletableFuture} future using + * passed parameters. Sets error or boolean result ('true' if table matches the passed-in + * targetState). */ private static CompletableFuture completeCheckTableState( CompletableFuture future, TableState tableState, Throwable error, @@ -703,7 +701,7 @@ public CompletableFuture isTableEnabled(TableName tableName) { } CompletableFuture future = new CompletableFuture<>(); addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> { - completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error, + completeCheckTableState(future, tableState.isPresent() ? tableState.get() : null, error, TableState.State.ENABLED, tableName); }); return future; @@ -716,7 +714,7 @@ public CompletableFuture isTableDisabled(TableName tableName) { } CompletableFuture future = new CompletableFuture<>(); addListener(AsyncMetaTableAccessor.getTableState(metaTable, tableName), (tableState, error) -> { - completeCheckTableState(future, tableState.isPresent()? tableState.get(): null, error, + completeCheckTableState(future, tableState.isPresent() ? tableState.get() : null, error, TableState.State.DISABLED, tableName); }); return future; @@ -737,8 +735,9 @@ public CompletableFuture isTableAvailable(TableName tableName, byte[][] private CompletableFuture isTableAvailable(TableName tableName, Optional splitKeys) { if (TableName.isMetaTableName(tableName)) { - return connection.registry.getMetaRegionLocations().thenApply(locs -> Stream - .of(locs.getRegionLocations()).allMatch(loc -> loc != null && loc.getServerName() != null)); + return connection.registry.getMetaRegionLocations() + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .allMatch(loc -> loc != null && loc.getServerName() != null)); } CompletableFuture future = new CompletableFuture<>(); addListener(isTableEnabled(tableName), (enabled, error) -> { @@ -753,15 +752,14 @@ private CompletableFuture isTableAvailable(TableName tableName, if (!enabled) { future.complete(false); } else { - addListener( - AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName), + addListener(AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName), (locations, error1) -> { if (error1 != null) { future.completeExceptionally(error1); return; } List notDeployedRegions = locations.stream() - .filter(loc -> loc.getServerName() == null).collect(Collectors.toList()); + .filter(loc -> loc.getServerName() == null).collect(Collectors.toList()); if (notDeployedRegions.size() > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Table " + tableName + " has " + notDeployedRegions.size() + " regions"); @@ -771,7 +769,7 @@ private CompletableFuture isTableAvailable(TableName tableName, } Optional available = - splitKeys.map(keys -> compareRegionsWithSplitKeys(locations, keys)); + splitKeys.map(keys -> compareRegionsWithSplitKeys(locations, keys)); future.complete(available.orElse(true)); }); } @@ -799,11 +797,12 @@ private boolean compareRegionsWithSplitKeys(List locations, byt } @Override - public CompletableFuture addColumnFamily( - TableName tableName, ColumnFamilyDescriptor columnFamily) { + public CompletableFuture addColumnFamily(TableName tableName, + ColumnFamilyDescriptor columnFamily) { return this. procedureCall(tableName, RequestConverter.buildAddColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), + ng.newNonce()), + (s, c, req, done) -> s.addColumn(c, req, done), (resp) -> resp.getProcId(), new AddColumnFamilyProcedureBiConsumer(tableName)); } @@ -811,8 +810,9 @@ public CompletableFuture addColumnFamily( public CompletableFuture deleteColumnFamily(TableName tableName, byte[] columnFamily) { return this. procedureCall(tableName, RequestConverter.buildDeleteColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.deleteColumn(c, req, done), - (resp) -> resp.getProcId(), new DeleteColumnFamilyProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.deleteColumn(c, req, done), (resp) -> resp.getProcId(), + new DeleteColumnFamilyProcedureBiConsumer(tableName)); } @Override @@ -820,21 +820,22 @@ public CompletableFuture modifyColumnFamily(TableName tableName, ColumnFamilyDescriptor columnFamily) { return this. procedureCall(tableName, RequestConverter.buildModifyColumnRequest(tableName, columnFamily, ng.getNonceGroup(), - ng.newNonce()), (s, c, req, done) -> s.modifyColumn(c, req, done), - (resp) -> resp.getProcId(), new ModifyColumnFamilyProcedureBiConsumer(tableName)); + ng.newNonce()), + (s, c, req, done) -> s.modifyColumn(c, req, done), (resp) -> resp.getProcId(), + new ModifyColumnFamilyProcedureBiConsumer(tableName)); } @Override public CompletableFuture modifyColumnFamilyStoreFileTracker(TableName tableName, - byte[] family, String dstSFT) { + byte[] family, String dstSFT) { return this - . procedureCall( - tableName, - RequestConverter.buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, - ng.getNonceGroup(), ng.newNonce()), - (s, c, req, done) -> s.modifyColumnStoreFileTracker(c, req, done), - (resp) -> resp.getProcId(), - new ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer(tableName)); + . procedureCall( + tableName, + RequestConverter.buildModifyColumnStoreFileTrackerRequest(tableName, family, dstSFT, + ng.getNonceGroup(), ng.newNonce()), + (s, c, req, done) -> s.modifyColumnStoreFileTracker(c, req, done), + (resp) -> resp.getProcId(), + new ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer(tableName)); } @Override @@ -863,37 +864,31 @@ public CompletableFuture deleteNamespace(String name) { @Override public CompletableFuture getNamespaceDescriptor(String name) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . - call(controller, stub, RequestConverter.buildGetNamespaceDescriptorRequest(name), - (s, c, req, done) -> s.getNamespaceDescriptor(c, req, done), (resp) - -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor()))).call(); + return this. newMasterCaller().action((controller, stub) -> this + . call( + controller, stub, RequestConverter.buildGetNamespaceDescriptorRequest(name), + (s, c, req, done) -> s.getNamespaceDescriptor(c, req, done), + (resp) -> ProtobufUtil.toNamespaceDescriptor(resp.getNamespaceDescriptor()))) + .call(); } @Override public CompletableFuture> listNamespaces() { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this - .> call( - controller, stub, ListNamespacesRequest.newBuilder().build(), (s, c, req, - done) -> s.listNamespaces(c, req, done), - (resp) -> resp.getNamespaceNameList())).call(); + return this.> newMasterCaller().action( + (controller, stub) -> this.> call( + controller, stub, ListNamespacesRequest.newBuilder().build(), + (s, c, req, done) -> s.listNamespaces(c, req, done), (resp) -> resp.getNamespaceNameList())) + .call(); } @Override public CompletableFuture> listNamespaceDescriptors() { - return this - .> newMasterCaller().action((controller, stub) -> this - .> call(controller, stub, - ListNamespaceDescriptorsRequest.newBuilder().build(), (s, c, req, done) -> - s.listNamespaceDescriptors(c, req, done), - (resp) -> ProtobufUtil.toNamespaceDescriptorList(resp))).call(); + return this.> newMasterCaller().action((controller, stub) -> this + .> call( + controller, stub, ListNamespaceDescriptorsRequest.newBuilder().build(), + (s, c, req, done) -> s.listNamespaceDescriptors(c, req, done), + (resp) -> ProtobufUtil.toNamespaceDescriptorList(resp))) + .call(); } @Override @@ -911,14 +906,14 @@ public CompletableFuture> getRegions(ServerName serverName) { public CompletableFuture> getRegions(TableName tableName) { if (tableName.equals(META_TABLE_NAME)) { return connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion) - .collect(Collectors.toList())); + .thenApply(locs -> Stream.of(locs.getRegionLocations()).map(HRegionLocation::getRegion) + .collect(Collectors.toList())); } else { - return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName) - .thenApply( - locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList())); + return AsyncMetaTableAccessor.getTableHRegionLocations(metaTable, tableName).thenApply( + locs -> locs.stream().map(HRegionLocation::getRegion).collect(Collectors.toList())); } } + @Override public CompletableFuture flush(TableName tableName) { return flush(tableName, null); @@ -943,8 +938,9 @@ public CompletableFuture flush(TableName tableName, byte[] columnFamily) { if (columnFamily != null) { props.put(HConstants.FAMILY_KEY_STR, Bytes.toString(columnFamily)); } - addListener(execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), - props), (ret, err3) -> { + addListener( + execProcedure(FLUSH_TABLE_PROCEDURE_SIGNATURE, tableName.getNameAsString(), props), + (ret, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { @@ -973,8 +969,8 @@ public CompletableFuture flushRegion(byte[] regionName, byte[] columnFamil } ServerName serverName = location.getServerName(); if (serverName == null) { - future - .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + future.completeExceptionally( + new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } addListener(flush(serverName, location.getRegion(), columnFamily), (ret, err2) -> { @@ -990,14 +986,13 @@ public CompletableFuture flushRegion(byte[] regionName, byte[] columnFamil private CompletableFuture flush(final ServerName serverName, final RegionInfo regionInfo, byte[] columnFamily) { - return this. newAdminCaller() - .serverName(serverName) - .action( - (controller, stub) -> this. adminCall( - controller, stub, RequestConverter.buildFlushRegionRequest(regionInfo - .getRegionName(), columnFamily, false), - (s, c, req, done) -> s.flushRegion(c, req, done), resp -> null)) - .call(); + return this. newAdminCaller().serverName(serverName) + .action((controller, + stub) -> this. adminCall(controller, + stub, RequestConverter.buildFlushRegionRequest(regionInfo.getRegionName(), + columnFamily, false), + (s, c, req, done) -> s.flushRegion(c, req, done), resp -> null)) + .call(); } @Override @@ -1117,8 +1112,8 @@ private CompletableFuture compactRegion(byte[] regionName, byte[] columnFa } ServerName serverName = location.getServerName(); if (serverName == null) { - future - .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + future.completeExceptionally( + new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } addListener(compact(location.getServerName(), location.getRegion(), major, columnFamily), @@ -1142,12 +1137,12 @@ private CompletableFuture> getTableHRegionLocations(TableN addListener(connection.registry.getMetaRegionLocations(), (metaRegions, err) -> { if (err != null) { future.completeExceptionally(err); - } else if (metaRegions == null || metaRegions.isEmpty() || - metaRegions.getDefaultRegionLocation() == null) { - future.completeExceptionally(new IOException("meta region does not found")); - } else { - future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation())); - } + } else if (metaRegions == null || metaRegions.isEmpty() + || metaRegions.getDefaultRegionLocation() == null) { + future.completeExceptionally(new IOException("meta region does not found")); + } else { + future.complete(Collections.singletonList(metaRegions.getDefaultRegionLocation())); + } }); return future; } else { @@ -1190,10 +1185,10 @@ private CompletableFuture compact(TableName tableName, byte[] columnFamily future.completeExceptionally(new TableNotFoundException(tableName)); } CompletableFuture[] compactFutures = - locations.stream().filter(l -> l.getRegion() != null) - .filter(l -> !l.getRegion().isOffline()).filter(l -> l.getServerName() != null) - .map(l -> compact(l.getServerName(), l.getRegion(), major, columnFamily)) - .toArray(CompletableFuture[]::new); + locations.stream().filter(l -> l.getRegion() != null) + .filter(l -> !l.getRegion().isOffline()).filter(l -> l.getServerName() != null) + .map(l -> compact(l.getServerName(), l.getRegion(), major, columnFamily)) + .toArray(CompletableFuture[]::new); // future complete unless all of the compact futures are completed. addListener(CompletableFuture.allOf(compactFutures), (ret, err2) -> { if (err2 != null) { @@ -1215,19 +1210,19 @@ private CompletableFuture compact(TableName tableName, byte[] columnFamily */ private CompletableFuture compact(final ServerName sn, final RegionInfo hri, final boolean major, byte[] columnFamily) { - return this - . newAdminCaller() - .serverName(sn) + return this. newAdminCaller().serverName(sn) .action( - (controller, stub) -> this. adminCall( - controller, stub, RequestConverter.buildCompactRegionRequest(hri.getRegionName(), - major, columnFamily), (s, c, req, done) -> s.compactRegion(c, req, done), - resp -> null)).call(); + (controller, stub) -> this + . adminCall(controller, stub, + RequestConverter.buildCompactRegionRequest(hri.getRegionName(), major, + columnFamily), + (s, c, req, done) -> s.compactRegion(c, req, done), resp -> null)) + .call(); } private byte[] toEncodeRegionName(byte[] regionName) { - return RegionInfo.isEncodedRegionName(regionName) ? regionName : - Bytes.toBytes(RegionInfo.encodeRegionName(regionName)); + return RegionInfo.isEncodedRegionName(regionName) ? regionName + : Bytes.toBytes(RegionInfo.encodeRegionName(regionName)); } private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference tableName, @@ -1247,8 +1242,8 @@ private void checkAndGetTableName(byte[] encodeRegionName, AtomicReference isSplitEnabled() { private CompletableFuture setSplitOrMergeOn(boolean enabled, boolean synchronous, MasterSwitchType switchType) { SetSplitOrMergeEnabledRequest request = - RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType); - return this. newMasterCaller() - .action((controller, stub) -> this + RequestConverter.buildSetSplitOrMergeEnabledRequest(enabled, synchronous, switchType); + return this. newMasterCaller().action((controller, stub) -> this . call(controller, stub, request, (s, c, req, done) -> s.setSplitOrMergeEnabled(c, req, done), (resp) -> resp.getPrevValueList().get(0))) - .call(); + .call(); } private CompletableFuture isSplitOrMergeOn(MasterSwitchType switchType) { IsSplitOrMergeEnabledRequest request = RequestConverter.buildIsSplitOrMergeEnabledRequest(switchType); - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, request, - (s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), - (resp) -> resp.getEnabled())).call(); + return this. newMasterCaller() + .action((controller, stub) -> this + . call(controller, + stub, request, (s, c, req, done) -> s.isSplitOrMergeEnabled(c, req, done), + (resp) -> resp.getEnabled())) + .call(); } @Override public CompletableFuture mergeRegions(List nameOfRegionsToMerge, boolean forcible) { if (nameOfRegionsToMerge.size() < 2) { return failedFuture(new IllegalArgumentException( - "Can not merge only " + nameOfRegionsToMerge.size() + " region")); + "Can not merge only " + nameOfRegionsToMerge.size() + " region")); } CompletableFuture future = new CompletableFuture<>(); byte[][] encodedNameOfRegionsToMerge = - nameOfRegionsToMerge.stream().map(this::toEncodeRegionName).toArray(byte[][]::new); + nameOfRegionsToMerge.stream().map(this::toEncodeRegionName).toArray(byte[][]::new); addListener(checkRegionsAndGetTableName(encodedNameOfRegionsToMerge), (tableName, err) -> { if (err != null) { @@ -1336,9 +1328,8 @@ public CompletableFuture mergeRegions(List nameOfRegionsToMerge, b } addListener( - this.procedureCall(tableName, request, - MasterService.Interface::mergeTableRegions, MergeTableRegionsResponse::getProcId, - new MergeTableRegionProcedureBiConsumer(tableName)), + this.procedureCall(tableName, request, MasterService.Interface::mergeTableRegions, + MergeTableRegionsResponse::getProcId, new MergeTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1363,10 +1354,10 @@ public CompletableFuture split(TableName tableName) { return; } addListener( - metaTable - .scanAll(new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY) - .withStartRow(MetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION)) - .withStopRow(MetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION))), + metaTable.scanAll( + new Scan().setReadType(ReadType.PREAD).addFamily(HConstants.CATALOG_FAMILY) + .withStartRow(MetaTableAccessor.getTableStartRowForMeta(tableName, QueryType.REGION)) + .withStopRow(MetaTableAccessor.getTableStopRowForMeta(tableName, QueryType.REGION))), (results, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1383,8 +1374,8 @@ public CompletableFuture split(TableName tableName) { for (HRegionLocation h : rl.getRegionLocations()) { if (h != null && h.getServerName() != null) { RegionInfo hri = h.getRegion(); - if (hri == null || hri.isSplitParent() || - hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { + if (hri == null || hri.isSplitParent() + || hri.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { continue; } splitFutures.add(split(hri, null)); @@ -1394,7 +1385,7 @@ public CompletableFuture split(TableName tableName) { } addListener( CompletableFuture - .allOf(splitFutures.toArray(new CompletableFuture[splitFutures.size()])), + .allOf(splitFutures.toArray(new CompletableFuture[splitFutures.size()])), (ret, exception) -> { if (exception != null) { future.completeExceptionally(exception); @@ -1422,7 +1413,7 @@ public CompletableFuture split(TableName tableName, byte[] splitPoint) { result.completeExceptionally(err); } else if (loc == null || loc.getRegion() == null) { result.completeExceptionally(new IllegalArgumentException( - "Region does not found: rowKey=" + Bytes.toStringBinary(splitPoint))); + "Region does not found: rowKey=" + Bytes.toStringBinary(splitPoint))); } else { addListener(splitRegion(loc.getRegion().getRegionName(), splitPoint), (ret, err2) -> { if (err2 != null) { @@ -1447,15 +1438,14 @@ public CompletableFuture splitRegion(byte[] regionName) { } RegionInfo regionInfo = location.getRegion(); if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - future - .completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + - "Replicas are auto-split when their primary is split.")); + future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split.")); return; } ServerName serverName = location.getServerName(); if (serverName == null) { - future - .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + future.completeExceptionally( + new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } addListener(split(regionInfo, null), (ret, err2) -> { @@ -1481,19 +1471,18 @@ public CompletableFuture splitRegion(byte[] regionName, byte[] splitPoint) } RegionInfo regionInfo = location.getRegion(); if (regionInfo.getReplicaId() != RegionInfo.DEFAULT_REPLICA_ID) { - future - .completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + - "Replicas are auto-split when their primary is split.")); + future.completeExceptionally(new IllegalArgumentException("Can't split replicas directly. " + + "Replicas are auto-split when their primary is split.")); return; } ServerName serverName = location.getServerName(); if (serverName == null) { - future - .completeExceptionally(new NoServerForRegionException(Bytes.toStringBinary(regionName))); + future.completeExceptionally( + new NoServerForRegionException(Bytes.toStringBinary(regionName))); return; } - if (regionInfo.getStartKey() != null && - Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) { + if (regionInfo.getStartKey() != null + && Bytes.compareTo(regionInfo.getStartKey(), splitPoint) == 0) { future.completeExceptionally( new IllegalArgumentException("should not give a splitkey which equals to startkey!")); return; @@ -1522,9 +1511,8 @@ private CompletableFuture split(final RegionInfo hri, byte[] splitPoint) { } addListener( - this.procedureCall(tableName, - request, MasterService.Interface::splitRegion, SplitTableRegionResponse::getProcId, - new SplitTableRegionProcedureBiConsumer(tableName)), + this.procedureCall(tableName, request, MasterService.Interface::splitRegion, + SplitTableRegionResponse::getProcId, new SplitTableRegionProcedureBiConsumer(tableName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1543,11 +1531,14 @@ public CompletableFuture assign(byte[] regionName) { future.completeExceptionally(err); return; } - addListener(this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this. call( - controller, stub, RequestConverter.buildAssignRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.assignRegion(c, req, done), resp -> null))) - .call(), (ret, err2) -> { + addListener( + this. newMasterCaller().priority(regionInfo.getTable()) + .action(((controller, stub) -> this + . call(controller, stub, + RequestConverter.buildAssignRegionRequest(regionInfo.getRegionName()), + (s, c, req, done) -> s.assignRegion(c, req, done), resp -> null))) + .call(), + (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); } else { @@ -1568,11 +1559,11 @@ public CompletableFuture unassign(byte[] regionName) { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this - . call(controller, stub, - RequestConverter.buildUnassignRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null))) - .call(), + .action(((controller, stub) -> this + . call(controller, stub, + RequestConverter.buildUnassignRegionRequest(regionInfo.getRegionName()), + (s, c, req, done) -> s.unassignRegion(c, req, done), resp -> null))) + .call(), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1594,11 +1585,11 @@ public CompletableFuture offline(byte[] regionName) { } addListener( this. newMasterCaller().priority(regionInfo.getTable()) - .action(((controller, stub) -> this - . call(controller, stub, - RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()), - (s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null))) - .call(), + .action(((controller, stub) -> this + . call(controller, stub, + RequestConverter.buildOfflineRegionRequest(regionInfo.getRegionName()), + (s, c, req, done) -> s.offlineRegion(c, req, done), resp -> null))) + .call(), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1644,7 +1635,7 @@ public CompletableFuture move(byte[] regionName, ServerName destServerName } addListener( moveRegion(regionInfo, RequestConverter - .buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), destServerName)), + .buildMoveRegionRequest(regionInfo.getEncodedNameAsBytes(), destServerName)), (ret, err2) -> { if (err2 != null) { future.completeExceptionally(err2); @@ -1658,58 +1649,57 @@ public CompletableFuture move(byte[] regionName, ServerName destServerName private CompletableFuture moveRegion(RegionInfo regionInfo, MoveRegionRequest request) { return this. newMasterCaller().priority(regionInfo.getTable()) - .action( - (controller, stub) -> this. call(controller, - stub, request, (s, c, req, done) -> s.moveRegion(c, req, done), resp -> null)) - .call(); + .action( + (controller, stub) -> this. call(controller, + stub, request, (s, c, req, done) -> s.moveRegion(c, req, done), resp -> null)) + .call(); } @Override public CompletableFuture setQuota(QuotaSettings quota) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call(controller, - stub, QuotaSettings.buildSetQuotaRequestProto(quota), - (s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null)).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, QuotaSettings.buildSetQuotaRequestProto(quota), + (s, c, req, done) -> s.setQuota(c, req, done), (resp) -> null)) + .call(); } @Override public CompletableFuture> getQuota(QuotaFilter filter) { CompletableFuture> future = new CompletableFuture<>(); Scan scan = QuotaTableUtil.makeScan(filter); - this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build() - .scan(scan, new AdvancedScanResultConsumer() { - List settings = new ArrayList<>(); - - @Override - public void onNext(Result[] results, ScanController controller) { - for (Result result : results) { - try { - QuotaTableUtil.parseResultToCollection(result, settings); - } catch (IOException e) { - controller.terminate(); - future.completeExceptionally(e); - } + this.connection.getTableBuilder(QuotaTableUtil.QUOTA_TABLE_NAME).build().scan(scan, + new AdvancedScanResultConsumer() { + List settings = new ArrayList<>(); + + @Override + public void onNext(Result[] results, ScanController controller) { + for (Result result : results) { + try { + QuotaTableUtil.parseResultToCollection(result, settings); + } catch (IOException e) { + controller.terminate(); + future.completeExceptionally(e); } } + } - @Override - public void onError(Throwable error) { - future.completeExceptionally(error); - } + @Override + public void onError(Throwable error) { + future.completeExceptionally(error); + } - @Override - public void onComplete() { - future.complete(settings); - } - }); + @Override + public void onComplete() { + future.complete(settings); + } + }); return future; } @Override - public CompletableFuture addReplicationPeer(String peerId, - ReplicationPeerConfig peerConfig, boolean enabled) { + public CompletableFuture addReplicationPeer(String peerId, ReplicationPeerConfig peerConfig, + boolean enabled) { return this. procedureCall( RequestConverter.buildAddReplicationPeerRequest(peerId, peerConfig, enabled), (s, c, req, done) -> s.addReplicationPeer(c, req, done), (resp) -> resp.getProcId(), @@ -1742,14 +1732,12 @@ public CompletableFuture disableReplicationPeer(String peerId) { @Override public CompletableFuture getReplicationPeerConfig(String peerId) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this - . call( - controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), ( - s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), - (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))).call(); + return this. newMasterCaller().action((controller, stub) -> this + . call( + controller, stub, RequestConverter.buildGetReplicationPeerConfigRequest(peerId), + (s, c, req, done) -> s.getReplicationPeerConfig(c, req, done), + (resp) -> ReplicationPeerConfigUtil.convert(resp.getPeerConfig()))) + .call(); } @Override @@ -1774,7 +1762,7 @@ public CompletableFuture appendReplicationPeerTableCFs(String id, addListener(getReplicationPeerConfig(id), (peerConfig, error) -> { if (!completeExceptionally(future, error)) { ReplicationPeerConfig newPeerConfig = - ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig); + ReplicationPeerConfigUtil.appendTableCFsToReplicationPeerConfig(tableCfs, peerConfig); addListener(updateReplicationPeerConfig(id, newPeerConfig), (result, err) -> { if (!completeExceptionally(future, error)) { future.complete(result); @@ -1798,7 +1786,7 @@ public CompletableFuture removeReplicationPeerTableCFs(String id, ReplicationPeerConfig newPeerConfig = null; try { newPeerConfig = ReplicationPeerConfigUtil - .removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id); + .removeTableCFsFromReplicationPeerConfig(tableCfs, peerConfig, id); } catch (ReplicationException e) { future.completeExceptionally(e); return; @@ -1825,17 +1813,16 @@ public CompletableFuture> listReplicationPeers( return listReplicationPeers(RequestConverter.buildListReplicationPeersRequest(pattern)); } - private CompletableFuture> listReplicationPeers( - ListReplicationPeersRequest request) { - return this - .> newMasterCaller() - .action( - (controller, stub) -> this.> call(controller, stub, request, - (s, c, req, done) -> s.listReplicationPeers(c, req, done), - (resp) -> resp.getPeerDescList().stream() - .map(ReplicationPeerConfigUtil::toReplicationPeerDescription) - .collect(Collectors.toList()))).call(); + private CompletableFuture> + listReplicationPeers(ListReplicationPeersRequest request) { + return this.> newMasterCaller().action( + (controller, stub) -> this + .> call( + controller, stub, request, (s, c, req, done) -> s.listReplicationPeers(c, req, done), + (resp) -> resp.getPeerDescList().stream() + .map(ReplicationPeerConfigUtil::toReplicationPeerDescription) + .collect(Collectors.toList()))) + .call(); } @Override @@ -1847,10 +1834,10 @@ public CompletableFuture> listReplicatedTableCFs() { tables.forEach(table -> { Map cfs = new HashMap<>(); Stream.of(table.getColumnFamilies()) - .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) - .forEach(column -> { - cfs.put(column.getNameAsString(), column.getScope()); - }); + .filter(column -> column.getScope() != HConstants.REPLICATION_SCOPE_LOCAL) + .forEach(column -> { + cfs.put(column.getNameAsString(), column.getScope()); + }); if (!cfs.isEmpty()) { replicatedTableCFs.add(new TableCFs(table.getTableName(), cfs)); } @@ -1864,7 +1851,7 @@ public CompletableFuture> listReplicatedTableCFs() { @Override public CompletableFuture snapshot(SnapshotDescription snapshotDesc) { SnapshotProtos.SnapshotDescription snapshot = - ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc); + ProtobufUtil.createHBaseProtosSnapshotDesc(snapshotDesc); try { ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); } catch (IllegalArgumentException e) { @@ -1873,10 +1860,11 @@ public CompletableFuture snapshot(SnapshotDescription snapshotDesc) { CompletableFuture future = new CompletableFuture<>(); final SnapshotRequest request = SnapshotRequest.newBuilder().setSnapshot(snapshot).build(); addListener(this. newMasterCaller() - .action((controller, stub) -> this. call(controller, - stub, request, (s, c, req, done) -> s.snapshot(c, req, done), - resp -> resp.getExpectedTimeout())) - .call(), (expectedTimeout, err) -> { + .action((controller, stub) -> this. call( + controller, stub, request, (s, c, req, done) -> s.snapshot(c, req, done), + resp -> resp.getExpectedTimeout())) + .call(), + (expectedTimeout, err) -> { if (err != null) { future.completeExceptionally(err); return; @@ -1898,16 +1886,17 @@ public void run(Timeout timeout) throws Exception { } else { // retry again after pauseTime. long pauseTime = - ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); + ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); pauseTime = Math.min(pauseTime, maxPauseTime); AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime, TimeUnit.MILLISECONDS); } }); } else { - future.completeExceptionally( - new SnapshotCreationException("Snapshot '" + snapshot.getName() + - "' wasn't completed in expectedTime:" + expectedTimeout + " ms", snapshotDesc)); + future.completeExceptionally(new SnapshotCreationException( + "Snapshot '" + snapshot.getName() + "' wasn't completed in expectedTime:" + + expectedTimeout + " ms", + snapshotDesc)); } } }; @@ -1918,15 +1907,13 @@ public void run(Timeout timeout) throws Exception { @Override public CompletableFuture isSnapshotFinished(SnapshotDescription snapshot) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, - stub, - IsSnapshotDoneRequest.newBuilder() - .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), (s, c, - req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone())).call(); + return this. newMasterCaller() + .action((controller, stub) -> this + . call(controller, stub, + IsSnapshotDoneRequest.newBuilder() + .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), + (s, c, req, done) -> s.isSnapshotDone(c, req, done), resp -> resp.getDone())) + .call(); } @Override @@ -1957,7 +1944,7 @@ public CompletableFuture restoreSnapshot(String snapshotName, boolean take } if (tableName == null) { future.completeExceptionally(new RestoreSnapshotException( - "Unable to find the table name for snapshot=" + snapshotName)); + "Unable to find the table name for snapshot=" + snapshotName)); return; } final TableName finalTableName = tableName; @@ -1991,12 +1978,12 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t CompletableFuture future = new CompletableFuture<>(); // Step.1 Take a snapshot of the current state String failSafeSnapshotSnapshotNameFormat = - this.connection.getConfiguration().get(HConstants.SNAPSHOT_RESTORE_FAILSAFE_NAME, - HConstants.DEFAULT_SNAPSHOT_RESTORE_FAILSAFE_NAME); + this.connection.getConfiguration().get(HConstants.SNAPSHOT_RESTORE_FAILSAFE_NAME, + HConstants.DEFAULT_SNAPSHOT_RESTORE_FAILSAFE_NAME); final String failSafeSnapshotSnapshotName = - failSafeSnapshotSnapshotNameFormat.replace("{snapshot.name}", snapshotName) - .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) - .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); + failSafeSnapshotSnapshotNameFormat.replace("{snapshot.name}", snapshotName) + .replace("{table.name}", tableName.toString().replace(TableName.NAMESPACE_DELIM, '.')) + .replace("{restore.timestamp}", String.valueOf(EnvironmentEdgeManager.currentTime())); LOG.info("Taking restore-failsafe snapshot: " + failSafeSnapshotSnapshotName); addListener(snapshot(failSafeSnapshotSnapshotName, tableName), (ret, err) -> { if (err != null) { @@ -2007,16 +1994,14 @@ private CompletableFuture restoreSnapshot(String snapshotName, TableName t (void2, err2) -> { if (err2 != null) { // Step.3.a Something went wrong during the restore and try to rollback. - addListener( - internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, restoreAcl, - null), - (void3, err3) -> { + addListener(internalRestoreSnapshot(failSafeSnapshotSnapshotName, tableName, + restoreAcl, null), (void3, err3) -> { if (err3 != null) { future.completeExceptionally(err3); } else { String msg = - "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + - failSafeSnapshotSnapshotName + " succeeded."; + "Restore snapshot=" + snapshotName + " failed. Rollback to snapshot=" + + failSafeSnapshotSnapshotName + " succeeded."; future.completeExceptionally(new RestoreSnapshotException(msg, err2)); } }); @@ -2074,23 +2059,24 @@ public CompletableFuture cloneSnapshot(String snapshotName, TableName tabl private CompletableFuture internalRestoreSnapshot(String snapshotName, TableName tableName, boolean restoreAcl, String customSFT) { SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder() - .setName(snapshotName).setTable(tableName.getNameAsString()).build(); + .setName(snapshotName).setTable(tableName.getNameAsString()).build(); try { ClientSnapshotDescriptionUtils.assertSnapshotRequestIsValid(snapshot); } catch (IllegalArgumentException e) { return failedFuture(e); } RestoreSnapshotRequest.Builder builder = - RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) - .setNonce(ng.newNonce()).setRestoreACL(restoreAcl); - if(customSFT != null){ + RestoreSnapshotRequest.newBuilder().setSnapshot(snapshot).setNonceGroup(ng.getNonceGroup()) + .setNonce(ng.newNonce()).setRestoreACL(restoreAcl); + if (customSFT != null) { builder.setCustomSFT(customSFT); } - return waitProcedureResult(this. newMasterCaller().action((controller, stub) -> this - . call(controller, stub, - builder.build(), - (s, c, req, done) -> s.restoreSnapshot(c, req, done), (resp) -> resp.getProcId())) - .call()); + return waitProcedureResult(this. newMasterCaller() + .action( + (controller, stub) -> this. call( + controller, stub, builder.build(), (s, c, req, done) -> s.restoreSnapshot(c, req, done), + (resp) -> resp.getProcId())) + .call()); } @Override @@ -2107,8 +2093,8 @@ public CompletableFuture> listSnapshots(Pattern patter private CompletableFuture> getCompletedSnapshots(Pattern pattern) { return this.> newMasterCaller().action((controller, stub) -> this - .> - call(controller, stub, GetCompletedSnapshotsRequest.newBuilder().build(), + .> call( + controller, stub, GetCompletedSnapshotsRequest.newBuilder().build(), (s, c, req, done) -> s.getCompletedSnapshots(c, req, done), resp -> ProtobufUtil.toSnapshotDescriptionList(resp, pattern))) .call(); @@ -2131,8 +2117,8 @@ public CompletableFuture> listTableSnapshots(Pattern t return getCompletedSnapshots(tableNamePattern, snapshotNamePattern); } - private CompletableFuture> getCompletedSnapshots( - Pattern tableNamePattern, Pattern snapshotNamePattern) { + private CompletableFuture> + getCompletedSnapshots(Pattern tableNamePattern, Pattern snapshotNamePattern) { CompletableFuture> future = new CompletableFuture<>(); addListener(listTableNames(tableNamePattern, false), (tableNames, err) -> { if (err != null) { @@ -2153,8 +2139,8 @@ private CompletableFuture> getCompletedSnapshots( return; } future.complete(snapshotDescList.stream() - .filter(snap -> (snap != null && tableNames.contains(snap.getTableName()))) - .collect(Collectors.toList())); + .filter(snap -> (snap != null && tableNames.contains(snap.getTableName()))) + .collect(Collectors.toList())); }); }); return future; @@ -2213,7 +2199,8 @@ private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern return; } addListener(CompletableFuture.allOf(snapshotDescriptions.stream() - .map(this::internalDeleteSnapshot).toArray(CompletableFuture[]::new)), (v, e) -> { + .map(this::internalDeleteSnapshot).toArray(CompletableFuture[]::new)), + (v, e) -> { if (e != null) { future.completeExceptionally(e); } else { @@ -2225,15 +2212,13 @@ private CompletableFuture internalDeleteSnapshots(Pattern tableNamePattern } private CompletableFuture internalDeleteSnapshot(SnapshotDescription snapshot) { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, - stub, - DeleteSnapshotRequest.newBuilder() - .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), (s, c, - req, done) -> s.deleteSnapshot(c, req, done), resp -> null)).call(); + return this. newMasterCaller() + .action((controller, stub) -> this + . call(controller, stub, + DeleteSnapshotRequest.newBuilder() + .setSnapshot(ProtobufUtil.createHBaseProtosSnapshotDesc(snapshot)).build(), + (s, c, req, done) -> s.deleteSnapshot(c, req, done), resp -> null)) + .call(); } @Override @@ -2241,12 +2226,13 @@ public CompletableFuture execProcedure(String signature, String instance, Map props) { CompletableFuture future = new CompletableFuture<>(); ProcedureDescription procDesc = - ProtobufUtil.buildProcedureDescription(signature, instance, props); + ProtobufUtil.buildProcedureDescription(signature, instance, props); addListener(this. newMasterCaller() - .action((controller, stub) -> this. call( - controller, stub, ExecProcedureRequest.newBuilder().setProcedure(procDesc).build(), - (s, c, req, done) -> s.execProcedure(c, req, done), resp -> resp.getExpectedTimeout())) - .call(), (expectedTimeout, err) -> { + .action((controller, stub) -> this. call( + controller, stub, ExecProcedureRequest.newBuilder().setProcedure(procDesc).build(), + (s, c, req, done) -> s.execProcedure(c, req, done), resp -> resp.getExpectedTimeout())) + .call(), + (expectedTimeout, err) -> { if (err != null) { future.completeExceptionally(err); return; @@ -2270,15 +2256,15 @@ public void run(Timeout timeout) throws Exception { } else { // retry again after pauseTime. long pauseTime = - ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); + ConnectionUtils.getPauseTime(TimeUnit.NANOSECONDS.toMillis(pauseNs), ++tries); pauseTime = Math.min(pauseTime, maxPauseTime); AsyncConnectionImpl.RETRY_TIMER.newTimeout(this, pauseTime, TimeUnit.MICROSECONDS); } }); } else { - future.completeExceptionally(new IOException("Procedure '" + signature + " : " + - instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); + future.completeExceptionally(new IOException("Procedure '" + signature + " : " + + instance + "' wasn't completed in expectedTime:" + expectedTimeout + " ms")); } } }; @@ -2326,61 +2312,56 @@ public CompletableFuture abortProcedure(long procId, boolean mayInterru @Override public CompletableFuture getProcedures() { - return this - . newMasterCaller() + return this. newMasterCaller() .action( - (controller, stub) -> this - . call( - controller, stub, GetProceduresRequest.newBuilder().build(), - (s, c, req, done) -> s.getProcedures(c, req, done), - resp -> ProtobufUtil.toProcedureJson(resp.getProcedureList()))).call(); + (controller, stub) -> this. call( + controller, stub, GetProceduresRequest.newBuilder().build(), + (s, c, req, done) -> s.getProcedures(c, req, done), + resp -> ProtobufUtil.toProcedureJson(resp.getProcedureList()))) + .call(); } @Override public CompletableFuture getLocks() { - return this - . newMasterCaller() - .action( - (controller, stub) -> this. call( - controller, stub, GetLocksRequest.newBuilder().build(), - (s, c, req, done) -> s.getLocks(c, req, done), - resp -> ProtobufUtil.toLockJson(resp.getLockList()))).call(); + return this. newMasterCaller() + .action((controller, stub) -> this. call( + controller, stub, GetLocksRequest.newBuilder().build(), + (s, c, req, done) -> s.getLocks(c, req, done), + resp -> ProtobufUtil.toLockJson(resp.getLockList()))) + .call(); } @Override - public CompletableFuture decommissionRegionServers( - List servers, boolean offload) { + public CompletableFuture decommissionRegionServers(List servers, + boolean offload) { return this. newMasterCaller() .action((controller, stub) -> this - . call( - controller, stub, + . call( + controller, stub, RequestConverter.buildDecommissionRegionServersRequest(servers, offload), - (s, c, req, done) -> s.decommissionRegionServers(c, req, done), resp -> null)) + (s, c, req, done) -> s.decommissionRegionServers(c, req, done), resp -> null)) .call(); } @Override public CompletableFuture> listDecommissionedRegionServers() { - return this.> newMasterCaller() - .action((controller, stub) -> this - .> call( - controller, stub, ListDecommissionedRegionServersRequest.newBuilder().build(), - (s, c, req, done) -> s.listDecommissionedRegionServers(c, req, done), - resp -> resp.getServerNameList().stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()))) + return this.> newMasterCaller().action((controller, stub) -> this + .> call( + controller, stub, ListDecommissionedRegionServersRequest.newBuilder().build(), + (s, c, req, done) -> s.listDecommissionedRegionServers(c, req, done), + resp -> resp.getServerNameList().stream().map(ProtobufUtil::toServerName) + .collect(Collectors.toList()))) .call(); } @Override public CompletableFuture recommissionRegionServer(ServerName server, List encodedRegionNames) { - return this. newMasterCaller() - .action((controller, stub) -> - this. call( - controller, stub, RequestConverter.buildRecommissionRegionServerRequest( - server, encodedRegionNames), (s, c, req, done) -> s.recommissionRegionServer( - c, req, done), resp -> null)).call(); + return this. newMasterCaller().action((controller, stub) -> this + . call(controller, + stub, RequestConverter.buildRecommissionRegionServerRequest(server, encodedRegionNames), + (s, c, req, done) -> s.recommissionRegionServer(c, req, done), resp -> null)) + .call(); } /** @@ -2401,8 +2382,8 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (encodedName.length() < RegionInfo.MD5_HEX_LENGTH) { // old format encodedName, should be meta region future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getEncodedName().equals(encodedName)).findFirst()); } else { future = AsyncMetaTableAccessor.getRegionLocationWithEncodedName(metaTable, regionNameOrEncodedRegionName); @@ -2419,12 +2400,11 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR if (regionInfo.isMetaRegion()) { future = connection.registry.getMetaRegionLocations() - .thenApply(locs -> Stream.of(locs.getRegionLocations()) - .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) - .findFirst()); + .thenApply(locs -> Stream.of(locs.getRegionLocations()) + .filter(loc -> loc.getRegion().getReplicaId() == regionInfo.getReplicaId()) + .findFirst()); } else { - future = - AsyncMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); + future = AsyncMetaTableAccessor.getRegionLocation(metaTable, regionNameOrEncodedRegionName); } } @@ -2436,8 +2416,8 @@ CompletableFuture getRegionLocation(byte[] regionNameOrEncodedR } if (!location.isPresent() || location.get().getRegion() == null) { returnedFuture.completeExceptionally( - new UnknownRegionException("Invalid region name or encoded region name: " + - Bytes.toStringBinary(regionNameOrEncodedRegionName))); + new UnknownRegionException("Invalid region name or encoded region name: " + + Bytes.toStringBinary(regionNameOrEncodedRegionName))); } else { returnedFuture.complete(location.get()); } @@ -2457,9 +2437,9 @@ private CompletableFuture getRegionInfo(byte[] regionNameOrEncodedRe } if (Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) || - Bytes.equals(regionNameOrEncodedRegionName, - RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { + RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionName()) + || Bytes.equals(regionNameOrEncodedRegionName, + RegionInfoBuilder.FIRST_META_REGIONINFO.getEncodedNameAsBytes())) { return CompletableFuture.completedFuture(RegionInfoBuilder.FIRST_META_REGIONINFO); } @@ -2596,7 +2576,7 @@ String getOperationType() { } private static class ModifyTableStoreFileTrackerProcedureBiConsumer - extends TableProcedureBiConsumer { + extends TableProcedureBiConsumer { ModifyTableStoreFileTrackerProcedureBiConsumer(AsyncAdmin admin, TableName tableName) { super(tableName); @@ -2699,7 +2679,7 @@ String getOperationType() { } private static class ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer - extends TableProcedureBiConsumer { + extends TableProcedureBiConsumer { ModifyColumnFamilyStoreFileTrackerProcedureBiConsumer(TableName tableName) { super(tableName); @@ -2809,12 +2789,11 @@ private CompletableFuture waitProcedureResult(CompletableFuture proc private void getProcedureResult(long procId, CompletableFuture future, int retries) { addListener( - this. newMasterCaller() - .action((controller, stub) -> this + this. newMasterCaller().action((controller, stub) -> this . call( controller, stub, GetProcedureResultRequest.newBuilder().setProcId(procId).build(), (s, c, req, done) -> s.getProcedureResult(c, req, done), (resp) -> resp)) - .call(), + .call(), (response, error) -> { if (error != null) { LOG.warn("failed to get the procedure result procId={}", procId, @@ -2858,54 +2837,51 @@ public CompletableFuture getClusterMetrics() { @Override public CompletableFuture getClusterMetrics(EnumSet
    - * - * - * - * - * - * - * - * - * - * - * - * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * *
    ROW-KEYFAM/QUALDATADESC
    n.<namespace>q:s<global-quotas>
    n.<namespace>u:p<namespace-quota policy>
    n.<namespace>u:s<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
    t.<table>q:s<global-quotas>
    t.<table>u:p<table-quota policy>
    t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
    u.<user>q:s<global-quotas>
    u.<user>q:s.<table><table-quotas>
    u.<user>q:s.<ns><namespace-quotas>
    ROW-KEYFAM/QUALDATADESC
    n.<namespace>q:s<global-quotas>
    n.<namespace>u:p<namespace-quota policy>
    n.<namespace>u:s<SpaceQuotaSnapshot>The size of all snapshots against tables in the namespace
    t.<table>q:s<global-quotas>
    t.<table>u:p<table-quota policy>
    t.<table>u:ss.<snapshot name><SpaceQuotaSnapshot>The size of a snapshot against a table
    u.<user>q:s<global-quotas>
    u.<user>q:s.<table><table-quotas>
    u.<user>q:s.<ns><namespace-quotas>
    */ @InterfaceAudience.Private @@ -116,8 +155,9 @@ public class QuotaTableUtil { */ public static final String QUOTA_REGION_SERVER_ROW_KEY = "all"; - /* ========================================================================= - * Quota "settings" helpers + /* + * ========================================================================= Quota "settings" + * helpers */ public static Quotas getTableQuota(final Connection connection, final TableName table) throws IOException { @@ -188,10 +228,10 @@ public static Get makeGetForUserQuotas(final String user, final Iterable namespaces) { Get get = new Get(getUserRowKey(user)); get.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - for (final TableName table: tables) { + for (final TableName table : tables) { get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserTable(table)); } - for (final String ns: namespaces) { + for (final String ns : namespaces) { get.addColumn(QUOTA_FAMILY_INFO, getSettingsQualifierForUserNamespace(ns)); } return get; @@ -219,9 +259,8 @@ public static Filter makeFilter(final QuotaFilter filter) { FilterList nsFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); nsFilters.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); - nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator( - getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); + nsFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( + getSettingsQualifierRegexForUserNamespace(filter.getNamespaceFilter()), 0))); userFilters.addFilter(nsFilters); hasFilter = true; } @@ -229,9 +268,8 @@ public static Filter makeFilter(final QuotaFilter filter) { FilterList tableFilters = new FilterList(FilterList.Operator.MUST_PASS_ALL); tableFilters.addFilter(new RowFilter(CompareOperator.EQUAL, new RegexStringComparator(getUserRowKeyRegex(filter.getUserFilter()), 0))); - tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, - new RegexStringComparator( - getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); + tableFilters.addFilter(new QualifierFilter(CompareOperator.EQUAL, new RegexStringComparator( + getSettingsQualifierRegexForUserTable(filter.getTableFilter()), 0))); userFilters.addFilter(tableFilters); hasFilter = true; } @@ -263,12 +301,12 @@ public static Scan makeQuotaSnapshotScan() { /** * Fetches all {@link SpaceQuotaSnapshot} objects from the {@code hbase:quota} table. - * * @param conn The HBase connection * @return A map of table names and their computed snapshot. */ - public static Map getSnapshots(Connection conn) throws IOException { - Map snapshots = new HashMap<>(); + public static Map getSnapshots(Connection conn) + throws IOException { + Map snapshots = new HashMap<>(); try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); ResultScanner rs = quotaTable.getScanner(makeQuotaSnapshotScan())) { for (Result r : rs) { @@ -311,15 +349,14 @@ public static Get makeQuotaSnapshotGetForTable(TableName tn) { /** * Extracts the {@link SpaceViolationPolicy} and {@link TableName} from the provided - * {@link Result} and adds them to the given {@link Map}. If the result does not contain - * the expected information or the serialized policy in the value is invalid, this method - * will throw an {@link IllegalArgumentException}. - * + * {@link Result} and adds them to the given {@link Map}. If the result does not contain the + * expected information or the serialized policy in the value is invalid, this method will throw + * an {@link IllegalArgumentException}. * @param result A row from the quota table. * @param snapshots A map of snapshots to add the result of this method into. */ - public static void extractQuotaSnapshot( - Result result, Map snapshots) { + public static void extractQuotaSnapshot(Result result, + Map snapshots) { byte[] row = Objects.requireNonNull(result).getRow(); if (row == null || row.length == 0) { throw new IllegalArgumentException("Provided result had a null row"); @@ -330,8 +367,8 @@ public static void extractQuotaSnapshot( throw new IllegalArgumentException("Result did not contain the expected column " + QUOTA_POLICY_COLUMN + ", " + result.toString()); } - ByteString buffer = UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + ByteString buffer = + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); try { QuotaProtos.SpaceQuotaSnapshot snapshot = QuotaProtos.SpaceQuotaSnapshot.parseFrom(buffer); snapshots.put(targetTableName, SpaceQuotaSnapshot.toSpaceQuotaSnapshot(snapshot)); @@ -342,27 +379,25 @@ public static void extractQuotaSnapshot( } public static interface UserQuotasVisitor { - void visitUserQuotas(final String userName, final Quotas quotas) - throws IOException; + void visitUserQuotas(final String userName, final Quotas quotas) throws IOException; + void visitUserQuotas(final String userName, final TableName table, final Quotas quotas) - throws IOException; + throws IOException; + void visitUserQuotas(final String userName, final String namespace, final Quotas quotas) - throws IOException; + throws IOException; } public static interface TableQuotasVisitor { - void visitTableQuotas(final TableName tableName, final Quotas quotas) - throws IOException; + void visitTableQuotas(final TableName tableName, final Quotas quotas) throws IOException; } public static interface NamespaceQuotasVisitor { - void visitNamespaceQuotas(final String namespace, final Quotas quotas) - throws IOException; + void visitNamespaceQuotas(final String namespace, final Quotas quotas) throws IOException; } private static interface RegionServerQuotasVisitor { - void visitRegionServerQuotas(final String regionServer, final Quotas quotas) - throws IOException; + void visitRegionServerQuotas(final String regionServer, final Quotas quotas) throws IOException; } public static interface QuotasVisitor extends UserQuotasVisitor, TableQuotasVisitor, @@ -426,8 +461,8 @@ public void visitRegionServerQuotas(String regionServer, Quotas quotas) { }); } - public static void parseNamespaceResult(final Result result, - final NamespaceQuotasVisitor visitor) throws IOException { + public static void parseNamespaceResult(final Result result, final NamespaceQuotasVisitor visitor) + throws IOException { String namespace = getNamespaceFromRowKey(result.getRow()); parseNamespaceResult(namespace, result, visitor); } @@ -482,7 +517,7 @@ protected static void parseUserResult(final String userName, final Result result Map familyMap = result.getFamilyMap(QUOTA_FAMILY_INFO); if (familyMap == null || familyMap.isEmpty()) return; - for (Map.Entry entry: familyMap.entrySet()) { + for (Map.Entry entry : familyMap.entrySet()) { Quotas quotas = quotasFromData(entry.getValue()); if (Bytes.startsWith(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX)) { String name = Bytes.toString(entry.getKey(), QUOTA_QUALIFIER_SETTINGS_PREFIX.length); @@ -505,9 +540,8 @@ protected static void parseUserResult(final String userName, final Result result */ static Put createPutForSpaceSnapshot(TableName tableName, SpaceQuotaSnapshot snapshot) { Put p = new Put(getTableRowKey(tableName)); - p.addColumn( - QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, - SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); + p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY, + SpaceQuotaSnapshot.toProtoSnapshot(snapshot).toByteArray()); return p; } @@ -516,23 +550,22 @@ static Put createPutForSpaceSnapshot(TableName tableName, SpaceQuotaSnapshot sna */ static Get makeGetForSnapshotSize(TableName tn, String snapshot) { Get g = new Get(Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(tn.toString()))); - g.addColumn( - QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + g.addColumn(QUOTA_FAMILY_USAGE, + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); return g; } /** - * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to - * the given {@code table}. + * Creates a {@link Put} to persist the current size of the {@code snapshot} with respect to the + * given {@code table}. */ static Put createPutForSnapshotSize(TableName tableName, String snapshot, long size) { // We just need a pb message with some `long usage`, so we can just reuse the // SpaceQuotaSnapshot message instead of creating a new one. Put p = new Put(getTableRowKey(tableName)); p.addColumn(QUOTA_FAMILY_USAGE, getSnapshotSizeQualifier(snapshot), - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setQuotaUsage(size).build().toByteArray()); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() + .setQuotaUsage(size).build().toByteArray()); return p; } @@ -542,14 +575,14 @@ static Put createPutForSnapshotSize(TableName tableName, String snapshot, long s static Put createPutForNamespaceSnapshotSize(String namespace, long size) { Put p = new Put(getNamespaceRowKey(namespace)); p.addColumn(QUOTA_FAMILY_USAGE, QUOTA_SNAPSHOT_SIZE_QUALIFIER, - org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot - .newBuilder().setQuotaUsage(size).build().toByteArray()); + org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuotaSnapshot.newBuilder() + .setQuotaUsage(size).build().toByteArray()); return p; } /** - * Returns a list of {@code Delete} to remove given table snapshot - * entries to remove from quota table + * Returns a list of {@code Delete} to remove given table snapshot entries to remove from quota + * table * @param snapshotEntriesToRemove the entries to remove */ static List createDeletesForExistingTableSnapshotSizes( @@ -560,7 +593,7 @@ static List createDeletesForExistingTableSnapshotSizes( for (String snapshot : entry.getValue()) { Delete d = new Delete(getTableRowKey(entry.getKey())); d.addColumns(QUOTA_FAMILY_USAGE, - Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); + Bytes.add(QUOTA_SNAPSHOT_SIZE_QUALIFIER, Bytes.toBytes(snapshot))); deletes.add(d); } } @@ -577,12 +610,12 @@ static List createDeletesForExistingTableSnapshotSizes(Connection connec } /** - * Returns a list of {@code Delete} to remove given namespace snapshot - * entries to removefrom quota table + * Returns a list of {@code Delete} to remove given namespace snapshot entries to removefrom quota + * table * @param snapshotEntriesToRemove the entries to remove */ - static List createDeletesForExistingNamespaceSnapshotSizes( - Set snapshotEntriesToRemove) { + static List + createDeletesForExistingNamespaceSnapshotSizes(Set snapshotEntriesToRemove) { List deletes = new ArrayList<>(); for (String snapshot : snapshotEntriesToRemove) { Delete d = new Delete(getNamespaceRowKey(snapshot)); @@ -599,7 +632,7 @@ static List createDeletesForExistingNamespaceSnapshotSizes( static List createDeletesForExistingNamespaceSnapshotSizes(Connection connection) throws IOException { return createDeletesForExistingSnapshotsFromScan(connection, - createScanForNamespaceSnapshotSizes()); + createScanForNamespaceSnapshotSizes()); } /** @@ -634,23 +667,23 @@ static List createDeletesForExistingSnapshotsFromScan(Connection connect * @param namespace the namespace to fetch the list of table usage snapshots */ static void deleteTableUsageSnapshotsForNamespace(Connection connection, String namespace) - throws IOException { + throws IOException { Scan s = new Scan(); - //Get rows for all tables in namespace + // Get rows for all tables in namespace s.setStartStopRowForPrefixScan( Bytes.add(QUOTA_TABLE_ROW_KEY_PREFIX, Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM))); - //Scan for table usage column (u:p) in quota table - s.addColumn(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY); - //Scan for table quota column (q:s) if table has a space quota defined - s.addColumn(QUOTA_FAMILY_INFO,QUOTA_QUALIFIER_SETTINGS); + // Scan for table usage column (u:p) in quota table + s.addColumn(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); + // Scan for table quota column (q:s) if table has a space quota defined + s.addColumn(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME); - ResultScanner rs = quotaTable.getScanner(s)) { + ResultScanner rs = quotaTable.getScanner(s)) { for (Result r : rs) { byte[] data = r.getValue(QUOTA_FAMILY_INFO, QUOTA_QUALIFIER_SETTINGS); - //if table does not have a table space quota defined, delete table usage column (u:p) + // if table does not have a table space quota defined, delete table usage column (u:p) if (data == null) { Delete delete = new Delete(r.getRow()); - delete.addColumns(QUOTA_FAMILY_USAGE,QUOTA_QUALIFIER_POLICY); + delete.addColumns(QUOTA_FAMILY_USAGE, QUOTA_QUALIFIER_POLICY); quotaTable.delete(delete); } } @@ -660,8 +693,7 @@ static void deleteTableUsageSnapshotsForNamespace(Connection connection, String /** * Fetches the computed size of all snapshots against tables in a namespace for space quotas. */ - static long getNamespaceSnapshotSize( - Connection conn, String namespace) throws IOException { + static long getNamespaceSnapshotSize(Connection conn, String namespace) throws IOException { try (Table quotaTable = conn.getTable(QuotaTableUtil.QUOTA_TABLE_NAME)) { Result r = quotaTable.get(createGetNamespaceSnapshotSize(namespace)); if (r.isEmpty()) { @@ -687,8 +719,8 @@ static Get createGetNamespaceSnapshotSize(String namespace) { * Parses the snapshot size from the given Cell's value. */ static long parseSnapshotSize(Cell c) throws InvalidProtocolBufferException { - ByteString bs = UnsafeByteOperations.unsafeWrap( - c.getValueArray(), c.getValueOffset(), c.getValueLength()); + ByteString bs = + UnsafeByteOperations.unsafeWrap(c.getValueArray(), c.getValueOffset(), c.getValueLength()); return QuotaProtos.SpaceQuotaSnapshot.parseFrom(bs).getQuotaUsage(); } @@ -737,22 +769,21 @@ static Scan createScanForSpaceSnapshotSizes(TableName table) { } // Just the usage family and only the snapshot size qualifiers - return s.addFamily(QUOTA_FAMILY_USAGE).setFilter( - new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); + return s.addFamily(QUOTA_FAMILY_USAGE) + .setFilter(new ColumnPrefixFilter(QUOTA_SNAPSHOT_SIZE_QUALIFIER)); } /** * Fetches any persisted HBase snapshot sizes stored in the quota table. The sizes here are - * computed relative to the table which the snapshot was created from. A snapshot's size will - * not include the size of files which the table still refers. These sizes, in bytes, are what - * is used internally to compute quota violation for tables and namespaces. - * + * computed relative to the table which the snapshot was created from. A snapshot's size will not + * include the size of files which the table still refers. These sizes, in bytes, are what is used + * internally to compute quota violation for tables and namespaces. * @return A map of snapshot name to size in bytes per space quota computations */ - public static Map getObservedSnapshotSizes(Connection conn) throws IOException { + public static Map getObservedSnapshotSizes(Connection conn) throws IOException { try (Table quotaTable = conn.getTable(QUOTA_TABLE_NAME); ResultScanner rs = quotaTable.getScanner(createScanForSpaceSnapshotSizes())) { - final Map snapshotSizes = new HashMap<>(); + final Map snapshotSizes = new HashMap<>(); for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { @@ -828,15 +859,16 @@ public static SpaceQuotaSnapshot getCurrentSnapshotFromQuotaTable(Connection con } } - /* ========================================================================= - * Quotas protobuf helpers + /* + * ========================================================================= Quotas protobuf + * helpers */ protected static Quotas quotasFromData(final byte[] data) throws IOException { return quotasFromData(data, 0, data.length); } - protected static Quotas quotasFromData( - final byte[] data, int offset, int length) throws IOException { + protected static Quotas quotasFromData(final byte[] data, int offset, int length) + throws IOException { int magicLen = ProtobufMagic.lengthOfPBMagic(); if (!ProtobufMagic.isPBMagicPrefix(data, offset, magicLen)) { throw new IOException("Missing pb magic prefix"); @@ -863,11 +895,10 @@ public static boolean isEmptyQuota(final Quotas quotas) { return !hasSettings; } - /* ========================================================================= - * HTable helpers + /* + * ========================================================================= HTable helpers */ - protected static Result doGet(final Connection connection, final Get get) - throws IOException { + protected static Result doGet(final Connection connection, final Get get) throws IOException { try (Table table = connection.getTable(QUOTA_TABLE_NAME)) { return table.get(get); } @@ -880,8 +911,9 @@ protected static Result[] doGet(final Connection connection, final List get } } - /* ========================================================================= - * Quota table row key helpers + /* + * ========================================================================= Quota table row key + * helpers */ protected static byte[] getUserRowKey(final String user) { return Bytes.add(QUOTA_USER_ROW_KEY_PREFIX, Bytes.toBytes(user)); @@ -905,7 +937,7 @@ protected static byte[] getSettingsQualifierForUserTable(final TableName tableNa protected static byte[] getSettingsQualifierForUserNamespace(final String namespace) { return Bytes.add(QUOTA_QUALIFIER_SETTINGS_PREFIX, - Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); + Bytes.toBytes(namespace + TableName.NAMESPACE_DELIM)); } protected static String getUserRowKeyRegex(final String user) { @@ -933,13 +965,13 @@ private static String getRowKeyRegEx(final byte[] prefix, final String regex) { } protected static String getSettingsQualifierRegexForUserTable(final String table) { - return '^' + Pattern.quote(Bytes.toString(QUOTA_QUALIFIER_SETTINGS_PREFIX)) + - table + "(?> getTableCFsMap() { * {@link ReplicationPeerConfigBuilder#setTableCFsMap(Map)} instead. */ @Deprecated - public ReplicationPeerConfig setTableCFsMap(Map> tableCFsMap) { + public ReplicationPeerConfig + setTableCFsMap(Map> tableCFsMap) { this.tableCFsMap = tableCFsMap; return this; } @@ -195,8 +194,8 @@ public Map> getExcludeTableCFsMap() { * {@link ReplicationPeerConfigBuilder#setExcludeTableCFsMap(Map)} instead. */ @Deprecated - public ReplicationPeerConfig setExcludeTableCFsMap(Map> tableCFsMap) { + public ReplicationPeerConfig + setExcludeTableCFsMap(Map> tableCFsMap) { this.excludeTableCFsMap = tableCFsMap; return this; } @@ -226,13 +225,13 @@ public boolean isSerial() { public static ReplicationPeerConfigBuilder newBuilder(ReplicationPeerConfig peerConfig) { ReplicationPeerConfigBuilderImpl builder = new ReplicationPeerConfigBuilderImpl(); builder.setClusterKey(peerConfig.getClusterKey()) - .setReplicationEndpointImpl(peerConfig.getReplicationEndpointImpl()) - .putAllPeerData(peerConfig.getPeerData()).putAllConfiguration(peerConfig.getConfiguration()) - .setTableCFsMap(peerConfig.getTableCFsMap()).setNamespaces(peerConfig.getNamespaces()) - .setReplicateAllUserTables(peerConfig.replicateAllUserTables()) - .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap()) - .setExcludeNamespaces(peerConfig.getExcludeNamespaces()) - .setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial()); + .setReplicationEndpointImpl(peerConfig.getReplicationEndpointImpl()) + .putAllPeerData(peerConfig.getPeerData()).putAllConfiguration(peerConfig.getConfiguration()) + .setTableCFsMap(peerConfig.getTableCFsMap()).setNamespaces(peerConfig.getNamespaces()) + .setReplicateAllUserTables(peerConfig.replicateAllUserTables()) + .setExcludeTableCFsMap(peerConfig.getExcludeTableCFsMap()) + .setExcludeNamespaces(peerConfig.getExcludeNamespaces()) + .setBandwidth(peerConfig.getBandwidth()).setSerial(peerConfig.isSerial()); return builder; } @@ -292,8 +291,7 @@ public ReplicationPeerConfigBuilder putPeerData(byte[] key, byte[] value) { } @Override - public ReplicationPeerConfigBuilder - setTableCFsMap(Map> tableCFsMap) { + public ReplicationPeerConfigBuilder setTableCFsMap(Map> tableCFsMap) { this.tableCFsMap = tableCFsMap; return this; } @@ -382,9 +380,9 @@ public boolean needToReplicate(TableName table) { * this peer config. * @param table name of the table * @param family family name - * @return true if (the family of) the table need replicate to the peer cluster. - * If passed family is null, return true if any CFs of the table need replicate; - * If passed family is not null, return true if the passed family need replicate. + * @return true if (the family of) the table need replicate to the peer cluster. If passed family + * is null, return true if any CFs of the table need replicate; If passed family is not + * null, return true if the passed family need replicate. */ public boolean needToReplicate(TableName table, byte[] family) { String namespace = table.getNamespaceAsString(); @@ -401,9 +399,9 @@ public boolean needToReplicate(TableName table, byte[] family) { // If cfs is null or empty then we can make sure that we do not need to replicate this table, // otherwise, we may still need to replicate the table but filter out some families. return cfs != null && !cfs.isEmpty() - // If exclude-table-cfs contains passed family then we make sure that we do not need to - // replicate this family. - && (family == null || !cfs.contains(Bytes.toString(family))); + // If exclude-table-cfs contains passed family then we make sure that we do not need to + // replicate this family. + && (family == null || !cfs.contains(Bytes.toString(family))); } else { // Not replicate all user tables, so filter by namespaces and table-cfs config if (namespaces == null && tableCFsMap == null) { @@ -417,9 +415,9 @@ public boolean needToReplicate(TableName table, byte[] family) { // If table-cfs contains this table then we can make sure that we need replicate some CFs of // this table. Further we need all CFs if tableCFsMap.get(table) is null or empty. return tableCFsMap != null && tableCFsMap.containsKey(table) - && (family == null || CollectionUtils.isEmpty(tableCFsMap.get(table)) - // If table-cfs must contain passed family then we need to replicate this family. - || tableCFsMap.get(table).contains(Bytes.toString(family))); + && (family == null || CollectionUtils.isEmpty(tableCFsMap.get(table)) + // If table-cfs must contain passed family then we need to replicate this family. + || tableCFsMap.get(table).contains(Bytes.toString(family))); } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java index 180239b93bf0..0a8b5da44153 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.hbase.TableName; import org.apache.yetus.audience.InterfaceAudience; @@ -60,7 +58,6 @@ public interface ReplicationPeerConfigBuilder { @InterfaceAudience.Private ReplicationPeerConfigBuilder removeConfiguration(String key); - /** * Adds all of the provided "raw" configuration entries to {@code this}. * @param configuration A collection of raw configuration entries @@ -90,17 +87,15 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData } /** - * Sets an explicit map of tables and column families in those tables that should be replicated - * to the given peer. Use {@link #setReplicateAllUserTables(boolean)} to replicate all tables - * to a peer. - * + * Sets an explicit map of tables and column families in those tables that should be replicated to + * the given peer. Use {@link #setReplicateAllUserTables(boolean)} to replicate all tables to a + * peer. * @param tableCFsMap A map from tableName to column family names. An empty collection can be - * passed to indicate replicating all column families. + * passed to indicate replicating all column families. * @return {@code this} * @see #setReplicateAllUserTables(boolean) */ - ReplicationPeerConfigBuilder - setTableCFsMap(Map> tableCFsMap); + ReplicationPeerConfigBuilder setTableCFsMap(Map> tableCFsMap); /** * Sets a unique collection of HBase namespaces that should be replicated to this peer. @@ -125,12 +120,11 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData ReplicationPeerConfigBuilder setReplicateAllUserTables(boolean replicateAllUserTables); /** - * Sets the mapping of table name to column families which should not be replicated. This - * method sets state which is mutually exclusive to {@link #setTableCFsMap(Map)}. Invoking this - * method is only relevant when all user tables are being replicated. - * - * @param tableCFsMap A mapping of table names to column families which should not be - * replicated. An empty list of column families implies all families for the table. + * Sets the mapping of table name to column families which should not be replicated. This method + * sets state which is mutually exclusive to {@link #setTableCFsMap(Map)}. Invoking this method is + * only relevant when all user tables are being replicated. + * @param tableCFsMap A mapping of table names to column families which should not be replicated. + * An empty list of column families implies all families for the table. * @return {@code this}. */ ReplicationPeerConfigBuilder setExcludeTableCFsMap(Map> tableCFsMap); @@ -140,7 +134,6 @@ default ReplicationPeerConfigBuilder putAllPeerData(Map peerData * configured to be replicated. This method sets state which is mutually exclusive to * {@link #setNamespaces(Set)}. Invoking this method is only relevant when all user tables are * being replicated. - * * @param namespaces A set of namespaces whose tables should not be replicated. * @return {@code this} */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java index ba97d07e7854..c2a21e85758f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerDescription.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java index b1f0861e3512..dca62ad513e7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AbstractHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +20,8 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.token.Token; @@ -58,7 +56,7 @@ public abstract class AbstractHBaseSaslRpcClient { protected AbstractHBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, Token token, InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed) - throws IOException { + throws IOException { this(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, "authentication"); } @@ -80,11 +78,11 @@ protected AbstractHBaseSaslRpcClient(Configuration conf, this.fallbackAllowed = fallbackAllowed; saslProps = SaslUtil.initSaslProperties(rpcProtection); - saslClient = provider.createClient( - conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); + saslClient = + provider.createClient(conf, serverAddr, securityInfo, token, fallbackAllowed, saslProps); if (saslClient == null) { - throw new IOException("Authentication provider " + provider.getClass() - + " returned a null SaslClient"); + throw new IOException( + "Authentication provider " + provider.getClass() + " returned a null SaslClient"); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java index 259a0a4d651d..873132899d98 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AccessDeniedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.yetus.audience.InterfaceAudience; - /** * Exception thrown by access-related methods. */ @@ -33,7 +32,7 @@ public AccessDeniedException() { } public AccessDeniedException(Class clazz, String s) { - super( "AccessDenied [" + clazz.getName() + "]: " + s); + super("AccessDenied [" + clazz.getName() + "]: " + s); } public AccessDeniedException(String s) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java index 65fc6172236d..e6c4822d0809 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/AuthMethod.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,15 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.yetus.audience.InterfaceAudience; /** Authentication method */ @InterfaceAudience.Private @@ -39,7 +36,7 @@ public enum AuthMethod { public final UserGroupInformation.AuthenticationMethod authenticationMethod; AuthMethod(byte code, String mechanismName, - UserGroupInformation.AuthenticationMethod authMethod) { + UserGroupInformation.AuthenticationMethod authMethod) { this.code = code; this.mechanismName = mechanismName; this.authenticationMethod = authMethod; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java index 97be44fff10d..31ed191f91a6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESUnwrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; /** * Unwrap messages with Crypto AES. Should be placed after a diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java index ceb3f35c0c75..40ce32073f8b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/CryptoAESWrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; @@ -27,9 +29,6 @@ import org.apache.hbase.thirdparty.io.netty.channel.CoalescingBufferQueue; import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; - /** * wrap messages with Crypto AES. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java index 74ad96e2cbda..ed163dcc31ce 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/EncryptionUtil.java @@ -36,7 +36,9 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; + import org.apache.hadoop.hbase.shaded.protobuf.generated.EncryptionProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; @@ -55,8 +57,8 @@ private EncryptionUtil() { } /** - * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. + * Protect a key by encrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. * @param conf configuration * @param key the raw key bytes * @param algorithm the algorithm to use with this key material @@ -71,18 +73,16 @@ public static byte[] wrapKey(Configuration conf, byte[] key, String algorithm) } /** - * Protect a key by encrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. + * Protect a key by encrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. * @param conf configuration * @param subject subject key alias * @param key the key * @return the encrypted key bytes */ - public static byte[] wrapKey(Configuration conf, String subject, Key key) - throws IOException { + public static byte[] wrapKey(Configuration conf, String subject, Key key) throws IOException { // Wrap the key with the configured encryption algorithm. - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); @@ -98,11 +98,11 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) byte[] keyBytes = key.getEncoded(); builder.setLength(keyBytes.length); builder.setHashAlgorithm(Encryption.getConfiguredHashAlgorithm(conf)); - builder.setHash( - UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); + builder + .setHash(UnsafeByteOperations.unsafeWrap(Encryption.computeCryptoKeyHash(conf, keyBytes))); ByteArrayOutputStream out = new ByteArrayOutputStream(); - Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, - conf, cipher, iv); + Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject, conf, cipher, + iv); builder.setData(UnsafeByteOperations.unsafeWrap(out.toByteArray())); // Build and return the protobuf message out.reset(); @@ -111,8 +111,8 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) } /** - * Unwrap a key by decrypting it with the secret key of the given subject. - * The configuration must be set up correctly for key alias resolution. + * Unwrap a key by decrypting it with the secret key of the given subject. The configuration must + * be set up correctly for key alias resolution. * @param conf configuration * @param subject subject key alias * @param value the encrypted key bytes @@ -122,10 +122,9 @@ public static byte[] wrapKey(Configuration conf, String subject, Key key) */ public static Key unwrapKey(Configuration conf, String subject, byte[] value) throws IOException, KeyException { - EncryptionProtos.WrappedKey wrappedKey = EncryptionProtos.WrappedKey.PARSER - .parseDelimitedFrom(new ByteArrayInputStream(value)); - String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, - HConstants.CIPHER_AES); + EncryptionProtos.WrappedKey wrappedKey = + EncryptionProtos.WrappedKey.PARSER.parseDelimitedFrom(new ByteArrayInputStream(value)); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Cipher cipher = Encryption.getCipher(conf, algorithm); if (cipher == null) { throw new RuntimeException("Cipher '" + algorithm + "' not available"); @@ -137,22 +136,22 @@ private static Key getUnwrapKey(Configuration conf, String subject, EncryptionProtos.WrappedKey wrappedKey, Cipher cipher) throws IOException, KeyException { String configuredHashAlgorithm = Encryption.getConfiguredHashAlgorithm(conf); String wrappedHashAlgorithm = wrappedKey.getHashAlgorithm().trim(); - if(!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { + if (!configuredHashAlgorithm.equalsIgnoreCase(wrappedHashAlgorithm)) { String msg = String.format("Unexpected encryption key hash algorithm: %s (expecting: %s)", wrappedHashAlgorithm, configuredHashAlgorithm); - if(Encryption.failOnHashAlgorithmMismatch(conf)) { + if (Encryption.failOnHashAlgorithmMismatch(conf)) { throw new KeyException(msg); } LOG.debug(msg); } ByteArrayOutputStream out = new ByteArrayOutputStream(); byte[] iv = wrappedKey.hasIv() ? wrappedKey.getIv().toByteArray() : null; - Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), - wrappedKey.getLength(), subject, conf, cipher, iv); + Encryption.decryptWithSubjectKey(out, wrappedKey.getData().newInput(), wrappedKey.getLength(), + subject, conf, cipher, iv); byte[] keyBytes = out.toByteArray(); if (wrappedKey.hasHash()) { if (!Bytes.equals(wrappedKey.getHash().toByteArray(), - Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes))) { + Encryption.hashWithAlg(wrappedHashAlgorithm, keyBytes))) { throw new KeyException("Key was not successfully unwrapped"); } } @@ -183,7 +182,6 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) /** * Helper to create an encyption context. - * * @param conf The current configuration. * @param family The current column descriptor. * @return The created encryption context. @@ -191,13 +189,13 @@ public static Key unwrapWALKey(Configuration conf, String subject, byte[] value) * @throws IllegalStateException in case of encryption related configuration errors */ public static Encryption.Context createEncryptionContext(Configuration conf, - ColumnFamilyDescriptor family) throws IOException { + ColumnFamilyDescriptor family) throws IOException { Encryption.Context cryptoContext = Encryption.Context.NONE; String cipherName = family.getEncryptionType(); if (cipherName != null) { - if(!Encryption.isEncryptionEnabled(conf)) { + if (!Encryption.isEncryptionEnabled(conf)) { throw new IllegalStateException("Encryption for family '" + family.getNameAsString() - + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); + + "' configured with type '" + cipherName + "' but the encryption feature is disabled"); } Cipher cipher; Key key; @@ -214,9 +212,9 @@ public static Encryption.Context createEncryptionContext(Configuration conf, // We use the encryption type specified in the column schema as a sanity check on // what the wrapped key is telling us if (!cipher.getName().equalsIgnoreCase(cipherName)) { - throw new IllegalStateException("Encryption for family '" + family.getNameAsString() - + "' configured with type '" + cipherName + "' but key specifies algorithm '" - + cipher.getName() + "'"); + throw new IllegalStateException( + "Encryption for family '" + family.getNameAsString() + "' configured with type '" + + cipherName + "' but key specifies algorithm '" + cipher.getName() + "'"); } } else { // Family does not provide key material, create a random key @@ -236,10 +234,7 @@ public static Encryption.Context createEncryptionContext(Configuration conf, /** * Helper for {@link #unwrapKey(Configuration, String, byte[])} which automatically uses the * configured master and alternative keys, rather than having to specify a key type to unwrap - * with. - * - * The configuration must be set up correctly for key alias resolution. - * + * with. The configuration must be set up correctly for key alias resolution. * @param conf the current configuration * @param keyBytes the key encrypted by master (or alternative) to unwrap * @return the key bytes, decrypted @@ -247,8 +242,8 @@ public static Encryption.Context createEncryptionContext(Configuration conf, */ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOException { Key key; - String masterKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, - User.getCurrent().getShortName()); + String masterKeyName = + conf.get(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, User.getCurrent().getShortName()); try { // First try the master key key = unwrapKey(conf, masterKeyName, keyBytes); @@ -258,8 +253,7 @@ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOExcept if (LOG.isDebugEnabled()) { LOG.debug("Unable to unwrap key with current master key '" + masterKeyName + "'"); } - String alternateKeyName = - conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); + String alternateKeyName = conf.get(HConstants.CRYPTO_MASTERKEY_ALTERNATE_NAME_CONF_KEY); if (alternateKeyName != null) { try { key = unwrapKey(conf, alternateKeyName, keyBytes); @@ -275,7 +269,6 @@ public static Key unwrapKey(Configuration conf, byte[] keyBytes) throws IOExcept /** * Helper to create an instance of CryptoAES. - * * @param conf The current configuration. * @param cryptoCipherMeta The metadata for create CryptoAES. * @return The instance of CryptoAES. @@ -286,13 +279,11 @@ public static CryptoAES createCryptoAES(RPCProtos.CryptoCipherMeta cryptoCipherM Properties properties = new Properties(); // the property for cipher class properties.setProperty(CryptoCipherFactory.CLASSES_KEY, - conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", - "org.apache.commons.crypto.cipher.JceCipher")); + conf.get("hbase.rpc.crypto.encryption.aes.cipher.class", + "org.apache.commons.crypto.cipher.JceCipher")); // create SaslAES for client return new CryptoAES(cryptoCipherMeta.getTransformation(), properties, - cryptoCipherMeta.getInKey().toByteArray(), - cryptoCipherMeta.getOutKey().toByteArray(), - cryptoCipherMeta.getInIv().toByteArray(), - cryptoCipherMeta.getOutIv().toByteArray()); + cryptoCipherMeta.getInKey().toByteArray(), cryptoCipherMeta.getOutKey().toByteArray(), + cryptoCipherMeta.getInIv().toByteArray(), cryptoCipherMeta.getOutIv().toByteArray()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java index 03af94ddad96..fde71630a1af 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/HBaseSaslRpcClient.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import java.io.BufferedInputStream; @@ -29,14 +28,11 @@ import java.io.OutputStream; import java.net.InetAddress; import java.nio.ByteBuffer; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; -import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.SaslInputStream; @@ -47,6 +43,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; + /** * A utility class that encapsulates SASL logic for RPC client. Copied from * org.apache.hadoop.security @@ -72,7 +70,7 @@ public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider p public HBaseSaslRpcClient(Configuration conf, SaslClientAuthenticationProvider provider, Token token, InetAddress serverAddr, SecurityInfo securityInfo, boolean fallbackAllowed, String rpcProtection, boolean initStreamForCrypto) - throws IOException { + throws IOException { super(conf, provider, token, serverAddr, securityInfo, fallbackAllowed, rpcProtection); this.initStreamForCrypto = initStreamForCrypto; } @@ -151,9 +149,8 @@ public boolean saslConnect(InputStream inS, OutputStream outS) throws IOExceptio try { readStatus(inStream); - } - catch (IOException e){ - if(e instanceof RemoteException){ + } catch (IOException e) { + if (e instanceof RemoteException) { LOG.debug("Sasl connection failed: ", e); throw e; } @@ -189,8 +186,8 @@ public String getSaslQOP() { return (String) saslClient.getNegotiatedProperty(Sasl.QOP); } - public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, - Configuration conf) throws IOException { + public void initCryptoCipher(RPCProtos.CryptoCipherMeta cryptoCipherMeta, Configuration conf) + throws IOException { // create SaslAES for client cryptoAES = EncryptionUtil.createCryptoAES(cryptoCipherMeta, conf); cryptoAesEnable = true; @@ -214,6 +211,7 @@ public InputStream getInputStream() throws IOException { class WrappedInputStream extends FilterInputStream { private ByteBuffer unwrappedRpcBuffer = ByteBuffer.allocate(0); + public WrappedInputStream(InputStream in) throws IOException { super(in); } @@ -279,6 +277,7 @@ class WrappedOutputStream extends FilterOutputStream { public WrappedOutputStream(OutputStream out) throws IOException { super(out); } + @Override public void write(byte[] buf, int off, int len) throws IOException { if (LOG.isDebugEnabled()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java index e4611d181378..8c67e851ce65 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseRpcConnectionHeaderHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; -import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.crypto.aes.CryptoAES; + import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos; /** @@ -43,7 +44,7 @@ public class NettyHBaseRpcConnectionHeaderHandler extends SimpleChannelInboundHa private final ByteBuf connectionHeaderWithLength; public NettyHBaseRpcConnectionHeaderHandler(Promise saslPromise, Configuration conf, - ByteBuf connectionHeaderWithLength) { + ByteBuf connectionHeaderWithLength) { this.saslPromise = saslPromise; this.conf = conf; this.connectionHeaderWithLength = connectionHeaderWithLength; @@ -61,8 +62,8 @@ protected void channelRead0(ChannelHandlerContext ctx, ByteBuf msg) throws Excep // Get the CryptoCipherMeta, update the HBaseSaslRpcClient for Crypto Cipher if (connectionHeaderResponse.hasCryptoCipherMeta()) { - CryptoAES cryptoAES = EncryptionUtil.createCryptoAES( - connectionHeaderResponse.getCryptoCipherMeta(), conf); + CryptoAES cryptoAES = + EncryptionUtil.createCryptoAES(connectionHeaderResponse.getCryptoCipherMeta(), conf); // replace the Sasl handler with Crypto AES handler setupCryptoAESHandler(ctx.pipeline(), cryptoAES); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java index a5b980350d15..a5293f2c1aff 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,9 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; -import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; - import java.io.IOException; import java.net.InetAddress; - import javax.security.sasl.Sasl; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.token.Token; @@ -33,6 +28,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline; +import org.apache.hbase.thirdparty.io.netty.handler.codec.LengthFieldBasedFrameDecoder; + /** * Implement SASL logic for netty rpc client. * @since 2.0.0 diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java index e011cc612e54..28eff1785543 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/NettyHBaseSaslRpcClientHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,25 +17,24 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; - import java.io.IOException; import java.net.InetAddress; import java.security.PrivilegedExceptionAction; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.hadoop.hbase.ipc.FallbackDisallowedException; import org.apache.hadoop.hbase.security.provider.SaslClientAuthenticationProvider; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; +import org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise; /** * Implement SASL logic for netty rpc client. @@ -69,8 +68,8 @@ public NettyHBaseSaslRpcClientHandler(Promise saslPromise, UserGroupInf this.ugi = ugi; this.conf = conf; this.saslRpcClient = new NettyHBaseSaslRpcClient(conf, provider, token, serverAddr, - securityInfo, fallbackAllowed, conf.get( - "hbase.rpc.protection", SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); + securityInfo, fallbackAllowed, conf.get("hbase.rpc.protection", + SaslUtil.QualityOfProtection.AUTHENTICATION.name().toLowerCase())); } private void writeResponse(ChannelHandlerContext ctx, byte[] response) { @@ -91,10 +90,10 @@ private void tryComplete(ChannelHandlerContext ctx) { } private void setCryptoAESOption() { - boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY. - getSaslQop().equalsIgnoreCase(saslRpcClient.getSaslQOP()); - needProcessConnectionHeader = saslEncryptionEnabled && conf.getBoolean( - "hbase.rpc.crypto.encryption.aes.enabled", false); + boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY.getSaslQop() + .equalsIgnoreCase(saslRpcClient.getSaslQOP()); + needProcessConnectionHeader = + saslEncryptionEnabled && conf.getBoolean("hbase.rpc.crypto.encryption.aes.enabled", false); } public boolean isNeedProcessConnectionHeader() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java index cbbcb0e77616..952550ef4c8a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslChallengeDecoder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,16 +17,15 @@ */ package org.apache.hadoop.hbase.security; -import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; -import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; -import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; - import java.io.IOException; import java.util.List; - import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; +import org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder; /** * Decode the sasl challenge sent by RpcServer. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java index 332bc1933d6e..90012c8bb1b6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslStatus.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public enum SaslStatus { - SUCCESS (0), - ERROR (1); + SUCCESS(0), ERROR(1); public final int state; + SaslStatus(int state) { this.state = state; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java index 00d0c41240ac..dfc36e4ba314 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUnwrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,15 +17,14 @@ */ package org.apache.hadoop.hbase.security; +import javax.security.sasl.SaslClient; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf; import org.apache.hbase.thirdparty.io.netty.buffer.Unpooled; import org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext; import org.apache.hbase.thirdparty.io.netty.channel.SimpleChannelInboundHandler; -import javax.security.sasl.SaslClient; - -import org.apache.yetus.audience.InterfaceAudience; - /** * Unwrap sasl messages. Should be placed after a * io.netty.handler.codec.LengthFieldBasedFrameDecoder diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java index ad2067f2cf22..e2c77845df92 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +20,10 @@ import java.util.Base64; import java.util.Map; import java.util.TreeMap; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; import javax.security.sasl.SaslException; import javax.security.sasl.SaslServer; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -39,9 +36,7 @@ public class SaslUtil { public static final int SWITCH_TO_SIMPLE_AUTH = -88; public enum QualityOfProtection { - AUTHENTICATION("auth"), - INTEGRITY("auth-int"), - PRIVACY("auth-conf"); + AUTHENTICATION("auth"), INTEGRITY("auth-int"), PRIVACY("auth-conf"); private final String saslQop; @@ -81,8 +76,8 @@ public static char[] encodePassword(byte[] password) { } /** - * Returns {@link org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection} - * corresponding to the given {@code stringQop} value. + * Returns {@link org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection} corresponding to + * the given {@code stringQop} value. * @throws IllegalArgumentException If stringQop doesn't match any QOP. */ public static QualityOfProtection getQop(String stringQop) { @@ -91,7 +86,7 @@ public static QualityOfProtection getQop(String stringQop) { return qop; } } - throw new IllegalArgumentException("Invalid qop: " + stringQop + throw new IllegalArgumentException("Invalid qop: " + stringQop + ". It must be one of 'authentication', 'integrity', 'privacy'."); } @@ -110,7 +105,7 @@ public static Map initSaslProperties(String rpcProtection) { QualityOfProtection qop = getQop(qops[i]); saslQopBuilder.append(",").append(qop.getSaslQop()); } - saslQop = saslQopBuilder.substring(1); // remove first ',' + saslQop = saslQopBuilder.substring(1); // remove first ',' } Map saslProps = new TreeMap<>(); saslProps.put(Sasl.QOP, saslQop); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java index 62c127e2dfb3..006a24f0fd07 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslWrapHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security; import javax.security.sasl.SaslClient; - import org.apache.hadoop.hbase.exceptions.ConnectionClosedException; import org.apache.yetus.audience.InterfaceAudience; @@ -31,7 +30,6 @@ import org.apache.hbase.thirdparty.io.netty.util.ReferenceCountUtil; import org.apache.hbase.thirdparty.io.netty.util.concurrent.PromiseCombiner; - /** * wrap sasl messages. */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java index a2f086fe3873..b5f21e6fc3ca 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SecurityInfo.java @@ -35,30 +35,28 @@ @InterfaceAudience.Private public class SecurityInfo { /** Maps RPC service names to authentication information */ - private static ConcurrentMap infos = new ConcurrentHashMap<>(); + private static ConcurrentMap infos = new ConcurrentHashMap<>(); // populate info for known services static { infos.put(AdminProtos.AdminService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, - Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(ClientProtos.ClientService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, - Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.REGIONSERVER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(MasterService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegionServerStatusProtos.RegionServerStatusService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(MasterProtos.HbckService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); infos.put(RegistryProtos.ClientMetaService.getDescriptor().getName(), - new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); + new SecurityInfo(SecurityConstants.MASTER_KRB_PRINCIPAL, Kind.HBASE_AUTH_TOKEN)); // NOTE: IF ADDING A NEW SERVICE, BE SURE TO UPDATE HBasePolicyProvider ALSO ELSE // new Service will not be found when all is Kerberized!!!! } /** - * Adds a security configuration for a new service name. Note that this will have no effect if - * the service name was already registered. + * Adds a security configuration for a new service name. Note that this will have no effect if the + * service name was already registered. */ public static void addInfo(String serviceName, SecurityInfo securityInfo) { infos.putIfAbsent(serviceName, securityInfo); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java index f56e8e21234b..ef7825d6fdf2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,23 +21,22 @@ import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.client.TableDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.client.security.SecurityCapability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService.BlockingInterface; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility client for doing access control admin operations. @@ -69,11 +68,9 @@ public static boolean isCellAuthorizationEnabled(Connection connection) throws I .contains(SecurityCapability.CELL_AUTHORIZATION); } - private static BlockingInterface getAccessControlServiceStub(Table ht) - throws IOException { + private static BlockingInterface getAccessControlServiceStub(Table ht) throws IOException { CoprocessorRpcChannel service = ht.coprocessorService(HConstants.EMPTY_START_ROW); - BlockingInterface protocol = - AccessControlProtos.AccessControlService.newBlockingStub(service); + BlockingInterface protocol = AccessControlProtos.AccessControlService.newBlockingStub(service); return protocol; } @@ -90,8 +87,8 @@ private static BlockingInterface getAccessControlServiceStub(Table ht) * @param actions * @throws Throwable */ - private static void grant(Connection connection, final TableName tableName, - final String userName, final byte[] family, final byte[] qual, boolean mergeExistingPermissions, + private static void grant(Connection connection, final TableName tableName, final String userName, + final byte[] family, final byte[] qual, boolean mergeExistingPermissions, final Permission.Action... actions) throws Throwable { connection.getAdmin().grant(new UserPermission(userName, Permission.newBuilder(tableName) .withFamily(family).withQualifier(qual).withActions(actions).build()), @@ -99,8 +96,8 @@ private static void grant(Connection connection, final TableName tableName, } /** - * Grants permission on the specified table for the specified user. - * If permissions for a specified user exists, later granted permissions will override previous granted permissions. + * Grants permission on the specified table for the specified user. If permissions for a specified + * user exists, later granted permissions will override previous granted permissions. * @param connection The Connection instance to use * @param tableName * @param userName @@ -133,8 +130,8 @@ private static void grant(Connection connection, final String namespace, final S } /** - * Grants permission on the specified namespace for the specified user. - * If permissions on the specified namespace exists, later granted permissions will override previous granted + * Grants permission on the specified namespace for the specified user. If permissions on the + * specified namespace exists, later granted permissions will override previous granted * permissions. * @param connection The Connection instance to use * @param namespace @@ -165,9 +162,8 @@ private static void grant(Connection connection, final String userName, } /** - * Grant global permissions for the specified user. - * If permissions for the specified user exists, later granted permissions will override previous granted - * permissions. + * Grant global permissions for the specified user. If permissions for the specified user exists, + * later granted permissions will override previous granted permissions. * @param connection * @param userName * @param actions @@ -195,9 +191,9 @@ public static boolean isAccessControllerRunning(Connection connection) * @param actions * @throws Throwable */ - public static void revoke(Connection connection, final TableName tableName, - final String username, final byte[] family, final byte[] qualifier, - final Permission.Action... actions) throws Throwable { + public static void revoke(Connection connection, final TableName tableName, final String username, + final byte[] family, final byte[] qualifier, final Permission.Action... actions) + throws Throwable { connection.getAdmin().revoke(new UserPermission(username, Permission.newBuilder(tableName) .withFamily(family).withQualifier(qualifier).withActions(actions).build())); } @@ -210,8 +206,8 @@ public static void revoke(Connection connection, final TableName tableName, * @param actions * @throws Throwable */ - public static void revoke(Connection connection, final String namespace, - final String userName, final Permission.Action... actions) throws Throwable { + public static void revoke(Connection connection, final String namespace, final String userName, + final Permission.Action... actions) throws Throwable { connection.getAdmin().revoke( new UserPermission(userName, Permission.newBuilder(namespace).withActions(actions).build())); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java index e0c4d99dfca5..a795d296fe7c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlConstants.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import org.apache.yetus.audience.InterfaceAudience; @@ -24,16 +23,16 @@ public interface AccessControlConstants { /** - * Configuration option that toggles whether EXEC permission checking is - * performed during coprocessor endpoint invocations. + * Configuration option that toggles whether EXEC permission checking is performed during + * coprocessor endpoint invocations. */ public static final String EXEC_PERMISSION_CHECKS_KEY = "hbase.security.exec.permission.checks"; /** Default setting for hbase.security.exec.permission.checks; false */ public static final boolean DEFAULT_EXEC_PERMISSION_CHECKS = false; /** - * Configuration or CF schema option for early termination of access checks - * if table or CF permissions grant access. Pre-0.98 compatible behavior + * Configuration or CF schema option for early termination of access checks if table or CF + * permissions grant access. Pre-0.98 compatible behavior */ public static final String CF_ATTRIBUTE_EARLY_OUT = "hbase.security.access.early_out"; /** Default setting for hbase.security.access.early_out */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java index 484cee3cb511..ca9de19e1488 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/AccessControlUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,39 +17,39 @@ */ package org.apache.hadoop.hbase.security.access; +import com.google.protobuf.ByteString; +import com.google.protobuf.RpcController; +import com.google.protobuf.ServiceException; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Admin; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -import com.google.protobuf.ByteString; -import com.google.protobuf.RpcController; -import com.google.protobuf.ServiceException; /** * @since 2.0.0 */ @InterfaceAudience.Private public class AccessControlUtil { - private AccessControlUtil() {} + private AccessControlUtil() { + } /** * Create a request to grant user table permissions. - * * @param username the short user name who to grant permissions * @param tableName optional table name the permissions apply * @param family optional column family @@ -57,11 +57,10 @@ private AccessControlUtil() {} * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, TableName tableName, byte[] family, byte[] qualifier, - boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.GrantRequest buildGrantRequest(String username, + TableName tableName, byte[] family, byte[] qualifier, boolean mergeExistingPermissions, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.TablePermission.Builder permissionBuilder = AccessControlProtos.TablePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { @@ -78,29 +77,24 @@ public static AccessControlProtos.GrantRequest buildGrantRequest( if (qualifier != null) { permissionBuilder.setQualifier(ByteStringer.wrap(qualifier)); } - ret.setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } /** * Create a request to grant user namespace permissions. - * * @param username the short user name who to grant permissions * @param namespace optional table name the permissions apply * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ - public static AccessControlProtos.GrantRequest buildGrantRequest( - String username, String namespace, boolean mergeExistingPermissions, + public static AccessControlProtos.GrantRequest buildGrantRequest(String username, + String namespace, boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.NamespacePermission.Builder permissionBuilder = AccessControlProtos.NamespacePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { @@ -110,54 +104,44 @@ public static AccessControlProtos.GrantRequest buildGrantRequest( permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace)); } ret.setType(AccessControlProtos.Permission.Type.Namespace) - .setNamespacePermission(permissionBuilder); + .setNamespacePermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } /** * Create a request to revoke user global permissions. - * * @param username the short user name whose permissions to be revoked * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - ret.setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } /** * Create a request to revoke user namespace permissions. - * * @param username the short user name whose permissions to be revoked * @param namespace optional table name the permissions apply * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, String namespace, - AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + String namespace, AccessControlProtos.Permission.Action... actions) { + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.NamespacePermission.Builder permissionBuilder = AccessControlProtos.NamespacePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { @@ -167,60 +151,51 @@ public static AccessControlProtos.RevokeRequest buildRevokeRequest( permissionBuilder.setNamespaceName(ByteString.copyFromUtf8(namespace)); } ret.setType(AccessControlProtos.Permission.Type.Namespace) - .setNamespacePermission(permissionBuilder); + .setNamespacePermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } /** * Create a request to grant user global permissions. - * * @param username the short user name who to grant permissions * @param actions the permissions to be granted * @return A {@link AccessControlProtos} GrantRequest */ public static AccessControlProtos.GrantRequest buildGrantRequest(String username, boolean mergeExistingPermissions, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.GlobalPermission.Builder permissionBuilder = AccessControlProtos.GlobalPermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { permissionBuilder.addAction(a); } - ret.setType(AccessControlProtos.Permission.Type.Global) - .setGlobalPermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Global).setGlobalPermission(permissionBuilder); return AccessControlProtos.GrantRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).setMergeExistingPermissions(mergeExistingPermissions).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .setMergeExistingPermissions(mergeExistingPermissions).build(); } public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions(String user, Permission perms) { return AccessControlProtos.UsersAndPermissions.newBuilder() .addUserPermissions(AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder() - .setUser(ByteString.copyFromUtf8(user)) - .addPermissions(toPermission(perms)) - .build()) + .setUser(ByteString.copyFromUtf8(user)).addPermissions(toPermission(perms)).build()) .build(); } - public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions( - ListMultimap perms) { + public static AccessControlProtos.UsersAndPermissions + toUsersAndPermissions(ListMultimap perms) { AccessControlProtos.UsersAndPermissions.Builder builder = AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perms.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (Permission perm: entry.getValue()) { + for (Permission perm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(perm)); } builder.addUserPermissions(userPermBuilder.build()); @@ -228,13 +203,13 @@ public static AccessControlProtos.UsersAndPermissions toUsersAndPermissions( return builder.build(); } - public static ListMultimap toUsersAndPermissions( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUsersAndPermissions(AccessControlProtos.UsersAndPermissions proto) { ListMultimap result = ArrayListMultimap.create(); - for (AccessControlProtos.UsersAndPermissions.UserPermissions userPerms: - proto.getUserPermissionsList()) { + for (AccessControlProtos.UsersAndPermissions.UserPermissions userPerms : proto + .getUserPermissionsList()) { String user = userPerms.getUser().toStringUtf8(); - for (AccessControlProtos.Permission perm: userPerms.getPermissionsList()) { + for (AccessControlProtos.Permission perm : userPerms.getPermissionsList()) { result.put(user, toPermission(perm)); } } @@ -308,7 +283,6 @@ public static Permission toPermission(AccessControlProtos.Permission proto) { /** * Convert a client Permission to a Permission proto - * * @param perm the client Permission * @return the protobuf Permission */ @@ -318,7 +292,7 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { NamespacePermission namespace = (NamespacePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Namespace); AccessControlProtos.NamespacePermission.Builder builder = - AccessControlProtos.NamespacePermission.newBuilder(); + AccessControlProtos.NamespacePermission.newBuilder(); builder.setNamespaceName(ByteString.copyFromUtf8(namespace.getNamespace())); Permission.Action[] actions = perm.getActions(); if (actions != null) { @@ -331,7 +305,7 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { TablePermission table = (TablePermission) perm; ret.setType(AccessControlProtos.Permission.Type.Table); AccessControlProtos.TablePermission.Builder builder = - AccessControlProtos.TablePermission.newBuilder(); + AccessControlProtos.TablePermission.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(table.getTableName())); if (table.hasFamily()) { builder.setFamily(ByteStringer.wrap(table.getFamily())); @@ -350,10 +324,10 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { // perm instanceof GlobalPermission ret.setType(AccessControlProtos.Permission.Type.Global); AccessControlProtos.GlobalPermission.Builder builder = - AccessControlProtos.GlobalPermission.newBuilder(); + AccessControlProtos.GlobalPermission.newBuilder(); Permission.Action[] actions = perm.getActions(); if (actions != null) { - for (Permission.Action a: actions) { + for (Permission.Action a : actions) { builder.addAction(toPermissionAction(a)); } } @@ -364,7 +338,6 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { /** * Converts a list of Permission.Action proto to an array of client Permission.Action objects. - * * @param protoActions the list of protobuf Actions * @return the converted array of Actions */ @@ -379,68 +352,62 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { /** * Converts a Permission.Action proto to a client Permission.Action object. - * * @param action the protobuf Action * @return the converted Action */ - public static Permission.Action toPermissionAction( - AccessControlProtos.Permission.Action action) { + public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; } - throw new IllegalArgumentException("Unknown action value "+action.name()); + throw new IllegalArgumentException("Unknown action value " + action.name()); } /** * Convert a client Permission.Action to a Permission.Action proto - * * @param action the client Action * @return the protobuf Action */ - public static AccessControlProtos.Permission.Action toPermissionAction( - Permission.Action action) { + public static AccessControlProtos.Permission.Action toPermissionAction(Permission.Action action) { switch (action) { - case READ: - return AccessControlProtos.Permission.Action.READ; - case WRITE: - return AccessControlProtos.Permission.Action.WRITE; - case EXEC: - return AccessControlProtos.Permission.Action.EXEC; - case CREATE: - return AccessControlProtos.Permission.Action.CREATE; - case ADMIN: - return AccessControlProtos.Permission.Action.ADMIN; + case READ: + return AccessControlProtos.Permission.Action.READ; + case WRITE: + return AccessControlProtos.Permission.Action.WRITE; + case EXEC: + return AccessControlProtos.Permission.Action.EXEC; + case CREATE: + return AccessControlProtos.Permission.Action.CREATE; + case ADMIN: + return AccessControlProtos.Permission.Action.ADMIN; } - throw new IllegalArgumentException("Unknown action value "+action.name()); + throw new IllegalArgumentException("Unknown action value " + action.name()); } /** * Convert a client user permission to a user permission proto - * * @param perm the client UserPermission * @return the protobuf UserPermission */ public static AccessControlProtos.UserPermission toUserPermission(UserPermission perm) { return AccessControlProtos.UserPermission.newBuilder() .setUser(ByteString.copyFromUtf8(perm.getUser())) - .setPermission(toPermission(perm.getPermission())) - .build(); + .setPermission(toPermission(perm.getPermission())).build(); } /** * Converts the permissions list into a protocol buffer GetUserPermissionsResponse */ - public static GetUserPermissionsResponse buildGetUserPermissionsResponse( - final List permissions) { + public static GetUserPermissionsResponse + buildGetUserPermissionsResponse(final List permissions) { GetUserPermissionsResponse.Builder builder = GetUserPermissionsResponse.newBuilder(); for (UserPermission perm : permissions) { builder.addUserPermission(toUserPermission(perm)); @@ -450,7 +417,6 @@ public static GetUserPermissionsResponse buildGetUserPermissionsResponse( /** * Converts a user permission proto to a client user permission object. - * * @param proto the protobuf UserPermission * @return the converted UserPermission */ @@ -459,21 +425,20 @@ public static UserPermission toUserPermission(AccessControlProtos.UserPermission } /** - * Convert a ListMultimap<String, TablePermission> where key is username - * to a protobuf UserPermission - * + * Convert a ListMultimap<String, TablePermission> where key is username to a protobuf + * UserPermission * @param perm the list of user and table permissions * @return the protobuf UserTablePermissions */ - public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( - ListMultimap perm) { + public static AccessControlProtos.UsersAndPermissions + toUserTablePermissions(ListMultimap perm) { AccessControlProtos.UsersAndPermissions.Builder builder = AccessControlProtos.UsersAndPermissions.newBuilder(); for (Map.Entry> entry : perm.asMap().entrySet()) { AccessControlProtos.UsersAndPermissions.UserPermissions.Builder userPermBuilder = AccessControlProtos.UsersAndPermissions.UserPermissions.newBuilder(); userPermBuilder.setUser(ByteString.copyFromUtf8(entry.getKey())); - for (UserPermission userPerm: entry.getValue()) { + for (UserPermission userPerm : entry.getValue()) { userPermBuilder.addPermissions(toPermission(userPerm.getPermission())); } builder.addUserPermissions(userPermBuilder.build()); @@ -485,7 +450,6 @@ public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( * A utility used to grant a user global permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions * @param actions the permissions to be granted @@ -494,24 +458,24 @@ public static AccessControlProtos.UsersAndPermissions toUserTablePermissions( */ @Deprecated public static void grant(RpcController controller, - AccessControlService.BlockingInterface protocol, String userShortName, boolean mergeExistingPermissions, - Permission.Action... actions) throws ServiceException { + AccessControlService.BlockingInterface protocol, String userShortName, + boolean mergeExistingPermissions, Permission.Action... actions) throws ServiceException { List permActions = Lists.newArrayListWithCapacity(actions.length); for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } - AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, mergeExistingPermissions, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + AccessControlProtos.GrantRequest request = + buildGrantRequest(userShortName, mergeExistingPermissions, + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } /** - * A utility used to grant a user table permissions. The permissions will - * be for a table table/column family/qualifier. + * A utility used to grant a user table permissions. The permissions will be for a table + * table/column family/qualifier. *

    * It's also called by the shell, in case you want to find references. - * * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to grant permissions * @param tableName optional table name @@ -541,7 +505,6 @@ public static void grant(RpcController controller, * A utility used to grant a user namespace permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param namespace the short name of the user to grant permissions @@ -558,8 +521,9 @@ public static void grant(RpcController controller, for (Permission.Action a : actions) { permActions.add(toPermissionAction(a)); } - AccessControlProtos.GrantRequest request = buildGrantRequest(userShortName, namespace, mergeExistingPermissions, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + AccessControlProtos.GrantRequest request = + buildGrantRequest(userShortName, namespace, mergeExistingPermissions, + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.grant(controller, request); } @@ -567,7 +531,6 @@ public static void grant(RpcController controller, * A utility used to revoke a user's global permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions @@ -585,16 +548,15 @@ public static void revoke(RpcController controller, permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } /** - * A utility used to revoke a user's table permissions. The permissions will - * be for a table/column family/qualifier. + * A utility used to revoke a user's table permissions. The permissions will be for a table/column + * family/qualifier. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions @@ -615,7 +577,7 @@ public static void revoke(RpcController controller, permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, tableName, f, q, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } @@ -623,7 +585,6 @@ public static void revoke(RpcController controller, * A utility used to revoke a user's namespace permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param userShortName the short name of the user to revoke permissions @@ -642,7 +603,7 @@ public static void revoke(RpcController controller, permActions.add(toPermissionAction(a)); } AccessControlProtos.RevokeRequest request = buildRevokeRequest(userShortName, namespace, - permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); + permActions.toArray(new AccessControlProtos.Permission.Action[actions.length])); protocol.revoke(controller, request); } @@ -650,7 +611,6 @@ public static void revoke(RpcController controller, * A utility used to get user's global permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @throws ServiceException on failure @@ -694,7 +654,6 @@ public static List getUserPermissions(RpcController controller, * A utility used to get user table permissions. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param t optional table name @@ -703,8 +662,7 @@ public static List getUserPermissions(RpcController controller, */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, - TableName t) throws ServiceException { + AccessControlService.BlockingInterface protocol, TableName t) throws ServiceException { return getUserPermissions(controller, protocol, t, null, null, HConstants.EMPTY_STRING); } @@ -754,7 +712,6 @@ public static List getUserPermissions(RpcController controller, * A utility used to get permissions for selected namespace. *

    * It's also called by the shell, in case you want to find references. - * * @param controller RpcController * @param protocol the AccessControlService protocol proxy * @param namespace name of the namespace @@ -763,8 +720,7 @@ public static List getUserPermissions(RpcController controller, */ @Deprecated public static List getUserPermissions(RpcController controller, - AccessControlService.BlockingInterface protocol, - byte[] namespace) throws ServiceException { + AccessControlService.BlockingInterface protocol, byte[] namespace) throws ServiceException { return getUserPermissions(controller, protocol, namespace, HConstants.EMPTY_STRING); } @@ -848,8 +804,8 @@ public static boolean hasPermission(RpcController controller, * @param proto the proto UsersAndPermissions * @return a ListMultimap with user and its permissions */ - public static ListMultimap toUserPermission( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUserPermission(AccessControlProtos.UsersAndPermissions proto) { ListMultimap userPermission = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -868,8 +824,8 @@ public static ListMultimap toUserPermission( * @param proto the proto UsersAndPermissions * @return a ListMultimap with user and its permissions */ - public static ListMultimap toPermission( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toPermission(AccessControlProtos.UsersAndPermissions proto) { ListMultimap perms = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { @@ -884,7 +840,6 @@ public static ListMultimap toPermission( /** * Create a request to revoke user table permissions. - * * @param username the short user name whose permissions to be revoked * @param tableName optional table name the permissions apply * @param family optional column family @@ -892,11 +847,10 @@ public static ListMultimap toPermission( * @param actions the permissions to be revoked * @return A {@link AccessControlProtos} RevokeRequest */ - public static AccessControlProtos.RevokeRequest buildRevokeRequest( - String username, TableName tableName, byte[] family, byte[] qualifier, + public static AccessControlProtos.RevokeRequest buildRevokeRequest(String username, + TableName tableName, byte[] family, byte[] qualifier, AccessControlProtos.Permission.Action... actions) { - AccessControlProtos.Permission.Builder ret = - AccessControlProtos.Permission.newBuilder(); + AccessControlProtos.Permission.Builder ret = AccessControlProtos.Permission.newBuilder(); AccessControlProtos.TablePermission.Builder permissionBuilder = AccessControlProtos.TablePermission.newBuilder(); for (AccessControlProtos.Permission.Action a : actions) { @@ -911,13 +865,10 @@ public static AccessControlProtos.RevokeRequest buildRevokeRequest( if (qualifier != null) { permissionBuilder.setQualifier(ByteStringer.wrap(qualifier)); } - ret.setType(AccessControlProtos.Permission.Type.Table) - .setTablePermission(permissionBuilder); + ret.setType(AccessControlProtos.Permission.Type.Table).setTablePermission(permissionBuilder); return AccessControlProtos.RevokeRequest.newBuilder() - .setUserPermission( - AccessControlProtos.UserPermission.newBuilder() - .setUser(ByteString.copyFromUtf8(username)) - .setPermission(ret) - ).build(); + .setUserPermission(AccessControlProtos.UserPermission.newBuilder() + .setUser(ByteString.copyFromUtf8(username)).setPermission(ret)) + .build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java index 8e1767cce944..cb45087e1018 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GetUserPermissionsRequest.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Objects; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java index 01d53ebb37f7..570c543b4b53 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/GlobalPermission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java index 7781d2295693..721530101835 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/NamespacePermission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.util.Objects; - import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java index 49f2432ffa58..834641194596 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; @@ -27,20 +26,17 @@ import java.util.List; import java.util.Map; import java.util.Objects; - import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.io.VersionedWritable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.io.VersionedWritable; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; /** - * Base permissions instance representing the ability to perform a given set - * of actions. - * + * Base permissions instance representing the ability to perform a given set of actions. * @see TablePermission */ @InterfaceAudience.Public @@ -52,11 +48,14 @@ public enum Action { READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A'); private final byte code; + Action(char code) { this.code = (byte) code; } - public byte code() { return code; } + public byte code() { + return code; + } } @InterfaceAudience.Private @@ -64,6 +63,7 @@ protected enum Scope { GLOBAL('G'), NAMESPACE('N'), TABLE('T'), EMPTY('E'); private final byte code; + Scope(char code) { this.code = (byte) code; } @@ -82,23 +82,15 @@ public byte code() { protected Scope scope = Scope.EMPTY; static { - ACTION_BY_CODE = ImmutableMap.of( - Action.READ.code, Action.READ, - Action.WRITE.code, Action.WRITE, - Action.EXEC.code, Action.EXEC, - Action.CREATE.code, Action.CREATE, - Action.ADMIN.code, Action.ADMIN - ); - - SCOPE_BY_CODE = ImmutableMap.of( - Scope.GLOBAL.code, Scope.GLOBAL, - Scope.NAMESPACE.code, Scope.NAMESPACE, - Scope.TABLE.code, Scope.TABLE, - Scope.EMPTY.code, Scope.EMPTY - ); + ACTION_BY_CODE = ImmutableMap.of(Action.READ.code, Action.READ, Action.WRITE.code, Action.WRITE, + Action.EXEC.code, Action.EXEC, Action.CREATE.code, Action.CREATE, Action.ADMIN.code, + Action.ADMIN); + + SCOPE_BY_CODE = ImmutableMap.of(Scope.GLOBAL.code, Scope.GLOBAL, Scope.NAMESPACE.code, + Scope.NAMESPACE, Scope.TABLE.code, Scope.TABLE, Scope.EMPTY.code, Scope.EMPTY); } - /** Empty constructor for Writable implementation. Do not use. */ + /** Empty constructor for Writable implementation. Do not use. */ public Permission() { super(); } @@ -114,8 +106,8 @@ public Permission(byte[] actionCodes) { for (byte code : actionCodes) { Action action = ACTION_BY_CODE.get(code); if (action == null) { - LOG.error("Ignoring unknown action code '" + - Bytes.toStringBinary(new byte[] { code }) + "'"); + LOG.error( + "Ignoring unknown action code '" + Bytes.toStringBinary(new byte[] { code }) + "'"); continue; } actions.add(action); @@ -146,9 +138,8 @@ public void setActions(Action[] assigned) { } /** - * Check if two permission equals regardless of actions. It is useful when - * merging a new permission with an existed permission which needs to check two permissions's - * fields. + * Check if two permission equals regardless of actions. It is useful when merging a new + * permission with an existed permission which needs to check two permissions's fields. * @param obj instance * @return true if equals, false otherwise */ @@ -221,8 +212,8 @@ public void readFields(DataInput in) throws IOException { byte b = in.readByte(); Action action = ACTION_BY_CODE.get(b); if (action == null) { - throw new IOException("Unknown action code '" + - Bytes.toStringBinary(new byte[] { b }) + "' in input"); + throw new IOException( + "Unknown action code '" + Bytes.toStringBinary(new byte[] { b }) + "' in input"); } actions.add(action); } @@ -235,7 +226,7 @@ public void write(DataOutput out) throws IOException { super.write(out); out.writeByte(actions != null ? actions.size() : 0); if (actions != null) { - for (Action a: actions) { + for (Action a : actions) { out.writeByte(a.code()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java index 661bcc842a8d..38e4167d81b9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/ShadedAccessControlUtil.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Collection; @@ -28,6 +27,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hbase.thirdparty.com.google.common.collect.ListMultimap; import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GetUserPermissionsResponse; import org.apache.hadoop.hbase.shaded.protobuf.generated.AccessControlProtos.GrantRequest; @@ -39,7 +39,6 @@ /** * Convert protobuf objects in AccessControl.proto under hbase-protocol-shaded to user-oriented * objects and vice versa.
    - * * In HBASE-15638, we create a hbase-protocol-shaded module for upgrading protobuf version to 3.x, * but there are still some coprocessor endpoints(such as AccessControl, Authentication, * MulitRowMutation) which depend on hbase-protocol module for CPEP compatibility. In fact, we use @@ -73,16 +72,16 @@ public static AccessControlProtos.Permission.Action toPermissionAction(Permissio */ public static Permission.Action toPermissionAction(AccessControlProtos.Permission.Action action) { switch (action) { - case READ: - return Permission.Action.READ; - case WRITE: - return Permission.Action.WRITE; - case EXEC: - return Permission.Action.EXEC; - case CREATE: - return Permission.Action.CREATE; - case ADMIN: - return Permission.Action.ADMIN; + case READ: + return Permission.Action.READ; + case WRITE: + return Permission.Action.WRITE; + case EXEC: + return Permission.Action.EXEC; + case CREATE: + return Permission.Action.CREATE; + case ADMIN: + return Permission.Action.ADMIN; } throw new IllegalArgumentException("Unknown action value " + action.name()); } @@ -218,8 +217,8 @@ public static AccessControlProtos.Permission toPermission(Permission perm) { * @param proto the protobuf UserPermission * @return the converted UserPermission */ - public static ListMultimap toUserTablePermissions( - AccessControlProtos.UsersAndPermissions proto) { + public static ListMultimap + toUserTablePermissions(AccessControlProtos.UsersAndPermissions proto) { ListMultimap perms = ArrayListMultimap.create(); AccessControlProtos.UsersAndPermissions.UserPermissions userPerm; for (int i = 0; i < proto.getUserPermissionsCount(); i++) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java index f17919f70bf9..b78728ce9013 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/TablePermission.java @@ -15,24 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Represents an authorization for access for the given actions, optionally - * restricted to the given column family or column qualifier, over the - * given table. If the family property is null, it implies - * full table access. + * Represents an authorization for access for the given actions, optionally restricted to the given + * column family or column qualifier, over the given table. If the family property is + * null, it implies full table access. */ @InterfaceAudience.Public public class TablePermission extends Permission { @@ -131,13 +128,13 @@ private boolean failCheckQualifier(byte[] qual) { } /** - * Checks if this permission grants access to perform the given action on - * the given table and key value. + * Checks if this permission grants access to perform the given action on the given table and key + * value. * @param table the table on which the operation is being performed * @param kv the KeyValue on which the operation is being requested * @param action the action requested - * @return true if the action is allowed over the given scope - * by this permission, otherwise false + * @return true if the action is allowed over the given scope by this permission, + * otherwise false */ public boolean implies(TableName table, KeyValue kv, Action action) { if (failCheckTable(table)) { @@ -168,8 +165,8 @@ public boolean tableFieldsEqual(TablePermission tp) { boolean tEq = (table == null && tp.table == null) || (table != null && table.equals(tp.table)); boolean fEq = (family == null && tp.family == null) || Bytes.equals(family, tp.family); - boolean qEq = (qualifier == null && tp.qualifier == null) || - Bytes.equals(qualifier, tp.qualifier); + boolean qEq = + (qualifier == null && tp.qualifier == null) || Bytes.equals(qualifier, tp.qualifier); return tEq && fEq && qEq; } @@ -212,10 +209,9 @@ public String toString() { protected String rawExpression() { StringBuilder raw = new StringBuilder(); if (table != null) { - raw.append("table=").append(table) - .append(", family=").append(family == null ? null : Bytes.toString(family)) - .append(", qualifier=").append(qualifier == null ? null : Bytes.toString(qualifier)) - .append(", "); + raw.append("table=").append(table).append(", family=") + .append(family == null ? null : Bytes.toString(family)).append(", qualifier=") + .append(qualifier == null ? null : Bytes.toString(qualifier)).append(", "); } return raw.toString() + super.rawExpression(); } @@ -224,7 +220,7 @@ protected String rawExpression() { public void readFields(DataInput in) throws IOException { super.readFields(in); byte[] tableBytes = Bytes.readByteArray(in); - if(tableBytes.length > 0) { + if (tableBytes.length > 0) { table = TableName.valueOf(tableBytes); } if (in.readBoolean()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java index 896ba5251a3c..874495126de1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/UserPermission.java @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** - * UserPermission consists of a user name and a permission. - * Permission can be one of [Global, Namespace, Table] permission. + * UserPermission consists of a user name and a permission. Permission can be one of [Global, + * Namespace, Table] permission. */ @InterfaceAudience.Public public class UserPermission { @@ -87,8 +85,7 @@ public int hashCode() { @Override public String toString() { - StringBuilder str = new StringBuilder("UserPermission: ") - .append("user=").append(user) + StringBuilder str = new StringBuilder("UserPermission: ").append("user=").append(user) .append(", ").append(permission.toString()); return str.toString(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java index d018ce19921b..375ad68deab0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AbstractSaslClientAuthenticationProvider.java @@ -27,11 +27,10 @@ */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving -public abstract class AbstractSaslClientAuthenticationProvider implements - SaslClientAuthenticationProvider { +public abstract class AbstractSaslClientAuthenticationProvider + implements SaslClientAuthenticationProvider { public static final String AUTH_TOKEN_TYPE = "HBASE_AUTH_TOKEN"; - @Override public final String getTokenKind() { // All HBase authentication tokens are "HBASE_AUTH_TOKEN"'s. We differentiate between them diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java index a681d53719d0..e5fe8f9b11b4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/AuthenticationProviderSelector.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Collection; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -41,9 +40,9 @@ void configure(Configuration conf, Collection availableProviders); /** - * Chooses the authentication provider which should be used given the provided client context - * from the authentication providers passed in via {@link #configure(Configuration, Collection)}. + * Chooses the authentication provider which should be used given the provided client context from + * the authentication providers passed in via {@link #configure(Configuration, Collection)}. */ - Pair> selectProvider( - String clusterId, User user); + Pair> + selectProvider(String clusterId, User user); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java index 752003dad8c6..cc957805f48a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInProviderSelector.java @@ -21,9 +21,7 @@ import java.util.Collection; import java.util.Objects; - import net.jcip.annotations.NotThreadSafe; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -39,14 +37,12 @@ /** * Default implementation of {@link AuthenticationProviderSelector} which can choose from the * authentication implementations which HBase provides out of the box: Simple, Kerberos, and - * Delegation Token authentication. - * - * This implementation will ignore any {@link SaslAuthenticationProvider}'s which are available - * on the classpath or specified in the configuration because HBase cannot correctly choose which - * token should be returned to a client when multiple are present. It is expected that users - * implement their own {@link AuthenticationProviderSelector} when writing a custom provider. - * - * This implementation is not thread-safe. {@link #configure(Configuration, Collection)} and + * Delegation Token authentication. This implementation will ignore any + * {@link SaslAuthenticationProvider}'s which are available on the classpath or specified in the + * configuration because HBase cannot correctly choose which token should be returned to a client + * when multiple are present. It is expected that users implement their own + * {@link AuthenticationProviderSelector} when writing a custom provider. This implementation is not + * thread-safe. {@link #configure(Configuration, Collection)} and * {@link #selectProvider(String, User)} is not safe if they are called concurrently. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @@ -61,8 +57,8 @@ public class BuiltInProviderSelector implements AuthenticationProviderSelector { Text digestAuthTokenKind = null; @Override - public void configure( - Configuration conf, Collection providers) { + public void configure(Configuration conf, + Collection providers) { if (this.conf != null) { throw new IllegalStateException("configure() should only be called once"); } @@ -100,8 +96,8 @@ public void configure( } @Override - public Pair> selectProvider( - String clusterId, User user) { + public Pair> + selectProvider(String clusterId, User user) { requireNonNull(clusterId, "Null clusterId was given"); requireNonNull(user, "Null user was given"); @@ -117,10 +113,10 @@ public Pair> // (for whatever that's worth). for (Token token : user.getTokens()) { // We need to check for two things: - // 1. This token is for the HBase cluster we want to talk to - // 2. We have suppporting client implementation to handle the token (the "kind" of token) - if (clusterIdAsText.equals(token.getService()) && - digestAuthTokenKind.equals(token.getKind())) { + // 1. This token is for the HBase cluster we want to talk to + // 2. We have suppporting client implementation to handle the token (the "kind" of token) + if (clusterIdAsText.equals(token.getService()) + && digestAuthTokenKind.equals(token.getKind())) { return new Pair<>(digestAuth, token); } } @@ -128,15 +124,16 @@ public Pair> final UserGroupInformation currentUser = user.getUGI(); // May be null if Hadoop AuthenticationMethod is PROXY final UserGroupInformation realUser = currentUser.getRealUser(); - if (currentUser.hasKerberosCredentials() || - (realUser != null && realUser.hasKerberosCredentials())) { + if (currentUser.hasKerberosCredentials() + || (realUser != null && realUser.hasKerberosCredentials())) { return new Pair<>(krbAuth, null); } // This indicates that a client is requesting some authentication mechanism which the servers // don't know how to process (e.g. there is no provider which can support it). This may be // a bug or simply a misconfiguration of client *or* server. LOG.warn("No matching SASL authentication provider and supporting token found from providers" - + " for user: {}", user); + + " for user: {}", + user); return null; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java index c1b7ddb7c554..712d4035448b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/BuiltInSaslAuthenticationProvider.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Base class for all Apache HBase, built-in {@link SaslAuthenticationProvider}'s to extend. - * - * HBase users should take care to note that this class (and its sub-classes) are marked with the + * Base class for all Apache HBase, built-in {@link SaslAuthenticationProvider}'s to extend. HBase + * users should take care to note that this class (and its sub-classes) are marked with the * {@code InterfaceAudience.Private} annotation. These implementations are available for users to * read, copy, and modify, but should not be extended or re-used in binary form. There are no * compatibility guarantees provided for implementations of this class. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java index 7cbdecd642be..98e6605413c5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslAuthenticationProvider.java @@ -26,8 +26,8 @@ @InterfaceAudience.Private public class DigestSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "DIGEST", (byte)82, "DIGEST-MD5", AuthenticationMethod.TOKEN); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("DIGEST", (byte) 82, "DIGEST-MD5", AuthenticationMethod.TOKEN); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java index a84f24b9080e..735d2ece1965 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/DigestSaslClientAuthenticationProvider.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; import javax.security.auth.callback.NameCallback; @@ -30,7 +29,6 @@ import javax.security.sasl.RealmChoiceCallback; import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -52,7 +50,7 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, Token token, boolean fallbackAllowed, Map saslProps) throws IOException { return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, - null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); + null, SaslUtil.SASL_DEFAULT_REALM, saslProps, new DigestSaslClientCallbackHandler(token)); } public static class DigestSaslClientCallbackHandler implements CallbackHandler { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java index 07101848e507..5ddb54fdad34 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslAuthenticationProvider.java @@ -26,8 +26,8 @@ @InterfaceAudience.Private public class GssSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "KERBEROS", (byte)81, "GSSAPI", AuthenticationMethod.KERBEROS); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("KERBEROS", (byte) 81, "GSSAPI", AuthenticationMethod.KERBEROS); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java index 21a4828b49e9..1ebd62a02ca5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/GssSaslClientAuthenticationProvider.java @@ -20,10 +20,8 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.Sasl; import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SaslUtil; import org.apache.hadoop.hbase.security.SecurityConstants; @@ -42,8 +40,8 @@ @InterfaceAudience.Private public class GssSaslClientAuthenticationProvider extends GssSaslAuthenticationProvider implements SaslClientAuthenticationProvider { - private static final Logger LOG = LoggerFactory.getLogger( - GssSaslClientAuthenticationProvider.class); + private static final Logger LOG = + LoggerFactory.getLogger(GssSaslClientAuthenticationProvider.class); private static boolean useCanonicalHostname(Configuration conf) { return !conf.getBoolean( @@ -57,10 +55,9 @@ public static String getHostnameForServerPrincipal(Configuration conf, InetAddre if (useCanonicalHostname(conf)) { hostname = addr.getCanonicalHostName(); if (hostname.equals(addr.getHostAddress())) { - LOG.warn("Canonical hostname for SASL principal is the same with IP address: " - + hostname + ", " + addr.getHostName() + ". Check DNS configuration or consider " - + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS - + "=true"); + LOG.warn("Canonical hostname for SASL principal is the same with IP address: " + hostname + + ", " + addr.getHostName() + ". Check DNS configuration or consider " + + SecurityConstants.UNSAFE_HBASE_CLIENT_KERBEROS_HOSTNAME_DISABLE_REVERSEDNS + "=true"); } } else { hostname = addr.getHostName(); @@ -89,11 +86,11 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddr, LOG.debug("Setting up Kerberos RPC to server={}", serverPrincipal); String[] names = SaslUtil.splitKerberosName(serverPrincipal); if (names.length != 3) { - throw new IOException("Kerberos principal '" + serverPrincipal - + "' does not have the expected format"); + throw new IOException( + "Kerberos principal '" + serverPrincipal + "' does not have the expected format"); } return Sasl.createSaslClient(new String[] { getSaslAuthMethod().getSaslMechanism() }, null, - names[0], names[1], saslProps, null); + names[0], names[1], saslProps, null); } @Override diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java index 7930564cb9f6..0303e8c48de8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthMethod.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.provider; import java.util.Objects; - import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -78,19 +77,13 @@ public boolean equals(Object o) { return false; } SaslAuthMethod other = (SaslAuthMethod) o; - return Objects.equals(name, other.name) && - code == other.code && - Objects.equals(saslMech, other.saslMech) && - Objects.equals(method, other.method); + return Objects.equals(name, other.name) && code == other.code + && Objects.equals(saslMech, other.saslMech) && Objects.equals(method, other.method); } @Override public int hashCode() { - return new HashCodeBuilder() - .append(name) - .append(code) - .append(saslMech) - .append(method) + return new HashCodeBuilder().append(name).append(code).append(saslMech).append(method) .toHashCode(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java index 1f6d821ce953..99e2916fa513 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslAuthenticationProvider.java @@ -22,13 +22,11 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. - * It is suggested that custom implementations extend the abstract class in the type hierarchy - * instead of directly implementing this interface (clients have a base class available, but - * servers presently do not). - * - * Implementations of this interface must be unique among each other via the {@code byte} - * returned by {@link SaslAuthMethod#getCode()} on {@link #getSaslAuthMethod()}. + * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. It is + * suggested that custom implementations extend the abstract class in the type hierarchy instead of + * directly implementing this interface (clients have a base class available, but servers presently + * do not). Implementations of this interface must be unique among each other via the + * {@code byte} returned by {@link SaslAuthMethod#getCode()} on {@link #getSaslAuthMethod()}. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java index 4b1cabcfc494..52f873f71aba 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProvider.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.SecurityInfo; @@ -38,10 +36,9 @@ /** * Encapsulation of client-side logic to authenticate to HBase via some means over SASL. * Implementations should not directly implement this interface, but instead extend - * {@link AbstractSaslClientAuthenticationProvider}. - * - * Implementations of this interface must make an implementation of {@code hashCode()} - * which returns the same value across multiple instances of the provider implementation. + * {@link AbstractSaslClientAuthenticationProvider}. Implementations of this interface must make an + * implementation of {@code hashCode()} which returns the same value across multiple instances of + * the provider implementation. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving @@ -60,18 +57,15 @@ SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo UserInformation getUserInfo(User user); /** - * Returns the "real" user, the user who has the credentials being authenticated by the - * remote service, in the form of an {@link UserGroupInformation} object. - * - * It is common in the Hadoop "world" to have distinct notions of a "real" user and a "proxy" - * user. A "real" user is the user which actually has the credentials (often, a Kerberos ticket), - * but some code may be running as some other user who has no credentials. This method gives - * the authentication provider a chance to acknowledge this is happening and ensure that any - * RPCs are executed with the real user's credentials, because executing them as the proxy user - * would result in failure because no credentials exist to authenticate the RPC. - * - * Not all implementations will need to implement this method. By default, the provided User's - * UGI is returned directly. + * Returns the "real" user, the user who has the credentials being authenticated by the remote + * service, in the form of an {@link UserGroupInformation} object. It is common in the Hadoop + * "world" to have distinct notions of a "real" user and a "proxy" user. A "real" user is the user + * which actually has the credentials (often, a Kerberos ticket), but some code may be running as + * some other user who has no credentials. This method gives the authentication provider a chance + * to acknowledge this is happening and ensure that any RPCs are executed with the real user's + * credentials, because executing them as the proxy user would result in failure because no + * credentials exist to authenticate the RPC. Not all implementations will need to implement this + * method. By default, the provided User's UGI is returned directly. */ default UserGroupInformation getRealUser(User ugi) { return ugi.getUGI(); @@ -86,8 +80,9 @@ default boolean canRetry() { } /** - * Executes any necessary logic to re-login the client. Not all implementations will have - * any logic that needs to be executed. + * Executes any necessary logic to re-login the client. Not all implementations will have any + * logic that needs to be executed. */ - default void relogin() throws IOException {} + default void relogin() throws IOException { + } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java index aaaee003c595..9ab989be940c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SaslClientAuthenticationProviders.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.ServiceLoader; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.security.User; @@ -43,8 +42,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.AUTHENTICATION) @InterfaceStability.Evolving public final class SaslClientAuthenticationProviders { - private static final Logger LOG = LoggerFactory.getLogger( - SaslClientAuthenticationProviders.class); + private static final Logger LOG = + LoggerFactory.getLogger(SaslClientAuthenticationProviders.class); public static final String SELECTOR_KEY = "hbase.client.sasl.provider.class"; public static final String EXTRA_PROVIDERS_KEY = "hbase.client.sasl.provider.extras"; @@ -55,8 +54,7 @@ public final class SaslClientAuthenticationProviders { private final Collection providers; private final AuthenticationProviderSelector selector; - private SaslClientAuthenticationProviders( - Collection providers, + private SaslClientAuthenticationProviders(Collection providers, AuthenticationProviderSelector selector) { this.providers = providers; this.selector = selector; @@ -90,11 +88,11 @@ public static synchronized void reset() { } /** - * Adds the given {@code provider} to the set, only if an equivalent provider does not - * already exist in the set. + * Adds the given {@code provider} to the set, only if an equivalent provider does not already + * exist in the set. */ static void addProviderIfNotExists(SaslClientAuthenticationProvider provider, - HashMap providers) { + HashMap providers) { Byte code = provider.getSaslAuthMethod().getCode(); SaslClientAuthenticationProvider existingProvider = providers.get(code); if (existingProvider != null) { @@ -109,8 +107,8 @@ static void addProviderIfNotExists(SaslClientAuthenticationProvider provider, */ static AuthenticationProviderSelector instantiateSelector(Configuration conf, Collection providers) { - Class clz = conf.getClass( - SELECTOR_KEY, BuiltInProviderSelector.class, AuthenticationProviderSelector.class); + Class clz = conf.getClass(SELECTOR_KEY, + BuiltInProviderSelector.class, AuthenticationProviderSelector.class); try { AuthenticationProviderSelector selector = clz.getConstructor().newInstance(); selector.configure(conf, providers); @@ -118,10 +116,11 @@ static AuthenticationProviderSelector instantiateSelector(Configuration conf, LOG.trace("Loaded ProviderSelector {}", selector.getClass()); } return selector; - } catch (InstantiationException | IllegalAccessException | NoSuchMethodException | - InvocationTargetException e) { - throw new RuntimeException("Failed to instantiate " + clz + - " as the ProviderSelector defined by " + SELECTOR_KEY, e); + } catch (InstantiationException | IllegalAccessException | NoSuchMethodException + | InvocationTargetException e) { + throw new RuntimeException( + "Failed to instantiate " + clz + " as the ProviderSelector defined by " + SELECTOR_KEY, + e); } } @@ -129,8 +128,8 @@ static AuthenticationProviderSelector instantiateSelector(Configuration conf, * Extracts and instantiates authentication providers from the configuration. */ static void addExplicitProviders(Configuration conf, - HashMap providers) { - for(String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { + HashMap providers) { + for (String implName : conf.getStringCollection(EXTRA_PROVIDERS_KEY)) { Class clz; // Load the class from the config try { @@ -143,7 +142,8 @@ static void addExplicitProviders(Configuration conf, // Make sure it's the right type if (!SaslClientAuthenticationProvider.class.isAssignableFrom(clz)) { LOG.warn("Ignoring SaslClientAuthenticationProvider {} because it is not an instance of" - + " SaslClientAuthenticationProvider", clz); + + " SaslClientAuthenticationProvider", + clz); continue; } @@ -170,19 +170,18 @@ static void addExplicitProviders(Configuration conf, static SaslClientAuthenticationProviders instantiate(Configuration conf) { ServiceLoader loader = ServiceLoader.load(SaslClientAuthenticationProvider.class); - HashMap providerMap = new HashMap<>(); + HashMap providerMap = new HashMap<>(); for (SaslClientAuthenticationProvider provider : loader) { addProviderIfNotExists(provider, providerMap); } addExplicitProviders(conf, providerMap); - Collection providers = Collections.unmodifiableCollection( - providerMap.values()); + Collection providers = + Collections.unmodifiableCollection(providerMap.values()); if (LOG.isTraceEnabled()) { - String loadedProviders = providers.stream() - .map((provider) -> provider.getClass().getName()) + String loadedProviders = providers.stream().map((provider) -> provider.getClass().getName()) .collect(Collectors.joining(", ")); LOG.trace("Found SaslClientAuthenticationProviders {}", loadedProviders); } @@ -192,16 +191,13 @@ static SaslClientAuthenticationProviders instantiate(Configuration conf) { } /** - * Returns the provider and token pair for SIMPLE authentication. - * - * This method is a "hack" while SIMPLE authentication for HBase does not flow through - * the SASL codepath. + * Returns the provider and token pair for SIMPLE authentication. This method is a "hack" while + * SIMPLE authentication for HBase does not flow through the SASL codepath. */ public Pair> getSimpleProvider() { Optional optional = providers.stream() - .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider) - .findFirst(); + .filter((p) -> p instanceof SimpleSaslClientAuthenticationProvider).findFirst(); return new Pair<>(optional.get(), null); } @@ -209,15 +205,14 @@ static SaslClientAuthenticationProviders instantiate(Configuration conf) { * Chooses the best authentication provider and corresponding token given the HBase cluster * identifier and the user. */ - public Pair> selectProvider( - String clusterId, User clientUser) { + public Pair> + selectProvider(String clusterId, User clientUser) { return selector.selectProvider(clusterId, clientUser); } @Override public String toString() { - return providers.stream() - .map((p) -> p.getClass().getName()) + return providers.stream().map((p) -> p.getClass().getName()) .collect(Collectors.joining(", ", "providers=[", "], selector=")) + selector.getClass(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java index 3f1122c75413..4b2ecb2cf476 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslAuthenticationProvider.java @@ -25,8 +25,8 @@ */ @InterfaceAudience.Private public class SimpleSaslAuthenticationProvider extends BuiltInSaslAuthenticationProvider { - public static final SaslAuthMethod SASL_AUTH_METHOD = new SaslAuthMethod( - "SIMPLE", (byte)80, "", AuthenticationMethod.SIMPLE); + public static final SaslAuthMethod SASL_AUTH_METHOD = + new SaslAuthMethod("SIMPLE", (byte) 80, "", AuthenticationMethod.SIMPLE); @Override public SaslAuthMethod getSaslAuthMethod() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java index 3a9142f34c44..d1098c81f61f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/provider/SimpleSaslClientAuthenticationProvider.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.net.InetAddress; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.security.SecurityInfo; import org.apache.hadoop.hbase.security.User; @@ -34,8 +32,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; @InterfaceAudience.Private -public class SimpleSaslClientAuthenticationProvider extends - SimpleSaslAuthenticationProvider implements SaslClientAuthenticationProvider { +public class SimpleSaslClientAuthenticationProvider extends SimpleSaslAuthenticationProvider + implements SaslClientAuthenticationProvider { @Override public SaslClient createClient(Configuration conf, InetAddress serverAddress, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java index 1e4a529f1eac..c247852be52c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenIdentifier.java @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; +import com.google.protobuf.ByteString; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.AuthenticationProtos; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.TokenIdentifier; - -import com.google.protobuf.ByteString; +import org.apache.yetus.audience.InterfaceAudience; /** * Represents the identity information stored in an HBase authentication token. @@ -51,8 +48,8 @@ public AuthenticationTokenIdentifier(String username) { this.username = username; } - public AuthenticationTokenIdentifier(String username, int keyId, - long issueDate, long expirationDate) { + public AuthenticationTokenIdentifier(String username, int keyId, long issueDate, + long expirationDate) { this.username = username; this.keyId = keyId; this.issueDate = issueDate; @@ -119,9 +116,7 @@ public byte[] toBytes() { if (username != null) { builder.setUsername(ByteString.copyFromUtf8(username)); } - builder.setIssueDate(issueDate) - .setExpirationDate(expirationDate) - .setKeyId(keyId) + builder.setIssueDate(issueDate).setExpirationDate(expirationDate).setKeyId(keyId) .setSequenceNumber(sequenceNumber); return builder.build().toByteArray(); } @@ -139,13 +134,13 @@ public void readFields(DataInput in) throws IOException { byte[] inBytes = new byte[len]; in.readFully(inBytes); AuthenticationProtos.TokenIdentifier.Builder builder = - AuthenticationProtos.TokenIdentifier.newBuilder(); + AuthenticationProtos.TokenIdentifier.newBuilder(); ProtobufUtil.mergeFrom(builder, inBytes); AuthenticationProtos.TokenIdentifier identifier = builder.build(); // sanity check on type - if (!identifier.hasKind() || - identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) { - throw new IOException("Invalid TokenIdentifier kind from input "+identifier.getKind()); + if (!identifier.hasKind() + || identifier.getKind() != AuthenticationProtos.TokenIdentifier.Kind.HBASE_AUTH_TOKEN) { + throw new IOException("Invalid TokenIdentifier kind from input " + identifier.getKind()); } // copy the field values @@ -172,26 +167,23 @@ public boolean equals(Object other) { return false; } if (other instanceof AuthenticationTokenIdentifier) { - AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier)other; - return sequenceNumber == ident.getSequenceNumber() - && keyId == ident.getKeyId() - && issueDate == ident.getIssueDate() - && expirationDate == ident.getExpirationDate() - && (username == null ? ident.getUsername() == null : - username.equals(ident.getUsername())); + AuthenticationTokenIdentifier ident = (AuthenticationTokenIdentifier) other; + return sequenceNumber == ident.getSequenceNumber() && keyId == ident.getKeyId() + && issueDate == ident.getIssueDate() && expirationDate == ident.getExpirationDate() + && (username == null ? ident.getUsername() == null + : username.equals(ident.getUsername())); } return false; } @Override public int hashCode() { - return (int)sequenceNumber; + return (int) sequenceNumber; } @Override public String toString() { - return "(username=" + username + ", keyId=" - + keyId + ", issueDate=" + issueDate - + ", expirationDate=" + expirationDate + ", sequenceNumber=" + sequenceNumber + ")"; + return "(username=" + username + ", keyId=" + keyId + ", issueDate=" + issueDate + + ", expirationDate=" + expirationDate + ", sequenceNumber=" + sequenceNumber + ")"; } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java index 39959ef61db4..709279b73604 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/AuthenticationTokenSelector.java @@ -15,22 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import java.util.Collection; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.security.token.TokenSelector; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; @InterfaceAudience.Private -public class AuthenticationTokenSelector - implements TokenSelector { +public class AuthenticationTokenSelector implements TokenSelector { private static final Logger LOG = LoggerFactory.getLogger(AuthenticationTokenSelector.class); public AuthenticationTokenSelector() { @@ -41,12 +38,12 @@ public Token selectToken(Text serviceName, Collection> tokens) { if (serviceName != null) { for (Token ident : tokens) { - if (serviceName.equals(ident.getService()) && - AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) { + if (serviceName.equals(ident.getService()) + && AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE.equals(ident.getKind())) { if (LOG.isDebugEnabled()) { - LOG.debug("Returning token "+ident); + LOG.debug("Returning token " + ident); } - return (Token)ident; + return (Token) ident; } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java index 09398edc589a..be2702777153 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/token/ClientTokenUtil.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.token; import com.google.protobuf.ByteString; @@ -35,6 +34,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; /** @@ -47,7 +47,8 @@ public final class ClientTokenUtil { // Set in TestClientTokenUtil via reflection private static ServiceException injectedException; - private ClientTokenUtil() {} + private ClientTokenUtil() { + } private static void injectFault() throws ServiceException { if (injectedException != null) { @@ -62,20 +63,18 @@ private static void injectFault() throws ServiceException { * @return the authentication token instance */ @InterfaceAudience.Private - public static Token obtainToken( - Connection conn) throws IOException { + public static Token obtainToken(Connection conn) + throws IOException { Table meta = null; try { injectFault(); meta = conn.getTable(TableName.META_TABLE_NAME); - CoprocessorRpcChannel rpcChannel = meta.coprocessorService( - HConstants.EMPTY_START_ROW); + CoprocessorRpcChannel rpcChannel = meta.coprocessorService(HConstants.EMPTY_START_ROW); AuthenticationProtos.AuthenticationService.BlockingInterface service = AuthenticationProtos.AuthenticationService.newBlockingStub(rpcChannel); - AuthenticationProtos.GetAuthenticationTokenResponse response = - service.getAuthenticationToken(null, - AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); + AuthenticationProtos.GetAuthenticationTokenResponse response = service.getAuthenticationToken( + null, AuthenticationProtos.GetAuthenticationTokenRequest.getDefaultInstance()); return toToken(response.getToken()); } catch (ServiceException se) { @@ -89,7 +88,6 @@ public static Token obtainToken( /** * Converts a Token instance (with embedded identifier) to the protobuf representation. - * * @param token the Token instance to copy * @return the protobuf Token message */ @@ -106,14 +104,12 @@ static AuthenticationProtos.Token toToken(Token t /** * Converts a protobuf Token message back into a Token instance. - * * @param proto the protobuf Token message * @return the Token instance */ @InterfaceAudience.Private static Token toToken(AuthenticationProtos.Token proto) { - return new Token<>( - proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, + return new Token<>(proto.hasIdentifier() ? proto.getIdentifier().toByteArray() : null, proto.hasPassword() ? proto.getPassword().toByteArray() : null, AuthenticationTokenIdentifier.AUTH_TOKEN_TYPE, proto.hasService() ? new Text(proto.getService().toStringUtf8()) : null); @@ -126,8 +122,8 @@ static Token toToken(AuthenticationProtos.Token p * @return the authentication token instance */ @InterfaceAudience.Private - static Token obtainToken( - final Connection conn, User user) throws IOException, InterruptedException { + static Token obtainToken(final Connection conn, User user) + throws IOException, InterruptedException { return user.runAs(new PrivilegedExceptionAction>() { @Override public Token run() throws Exception { @@ -137,15 +133,13 @@ public Token run() throws Exception { } /** - * Obtain an authentication token for the given user and add it to the - * user's credentials. + * Obtain an authentication token for the given user and add it to the user's credentials. * @param conn The HBase cluster connection * @param user The user for whom to obtain the token * @throws IOException If making a remote call to the authentication service fails * @throws InterruptedException If executing as the given user is interrupted */ - public static void obtainAndCacheToken(final Connection conn, - User user) + public static void obtainAndCacheToken(final Connection conn, User user) throws IOException, InterruptedException { try { Token token = obtainToken(conn, user); @@ -154,8 +148,7 @@ public static void obtainAndCacheToken(final Connection conn, throw new IOException("No token returned for user " + user.getName()); } if (LOG.isDebugEnabled()) { - LOG.debug("Obtained token " + token.getKind().toString() + " for user " + - user.getName()); + LOG.debug("Obtained token " + token.getKind().toString() + " for user " + user.getName()); } user.addToken(token); } catch (IOException | InterruptedException | RuntimeException e) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java index f8ac1b966097..f15bab6c0951 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/Authorizations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -31,6 +30,7 @@ public class Authorizations { private List labels; + public Authorizations(String... labels) { this.labels = new ArrayList<>(labels.length); Collections.addAll(this.labels, labels); @@ -43,12 +43,12 @@ public Authorizations(List labels) { public List getLabels() { return Collections.unmodifiableList(this.labels); } - + @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("[ "); - for (String label: labels) { + for (String label : labels) { sb.append(label); sb.append(' '); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java index 6cf8fb748dfd..8abaee005094 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/CellVisibility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hbase.security.visibility; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * This contains a visibility expression which can be associated with a cell. When it is set with a * Mutation, all the cells in that mutation will get associated with this expression. A visibility - * expression can contain visibility labels combined with logical - * operators AND(&), OR(|) and NOT(!) + * expression can contain visibility labels combined with logical operators AND(&), OR(|) and + * NOT(!) */ @InterfaceAudience.Public public class CellVisibility { @@ -48,25 +48,22 @@ public String toString() { } /** - * Helps in quoting authentication Strings. Use this if unicode characters to - * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * Helps in quoting authentication Strings. Use this if unicode characters to be used in + * expression or special characters like '(', ')', '"','\','&','|','!' */ public static String quote(String auth) { return quote(Bytes.toBytes(auth)); } /** - * Helps in quoting authentication Strings. Use this if unicode characters to - * be used in expression or special characters like '(', ')', - * '"','\','&','|','!' + * Helps in quoting authentication Strings. Use this if unicode characters to be used in + * expression or special characters like '(', ')', '"','\','&','|','!' */ public static String quote(byte[] auth) { int escapeChars = 0; for (int i = 0; i < auth.length; i++) - if (auth[i] == '"' || auth[i] == '\\') - escapeChars++; + if (auth[i] == '"' || auth[i] == '\\') escapeChars++; byte[] escapedAuth = new byte[auth.length + escapeChars + 2]; int index = 1; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java index 778288d4c03f..e9160ec976c2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/InvalidLabelException.java @@ -28,4 +28,3 @@ public InvalidLabelException(String msg) { super(msg); } } - diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index 35564d626e8c..591f85eec74c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,11 +21,9 @@ import com.google.protobuf.ByteString; import com.google.protobuf.ServiceException; - import java.io.IOException; import java.util.Map; import java.util.regex.Pattern; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Connection; @@ -48,7 +46,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; - /** * Utility client for doing visibility labels admin operations. */ @@ -68,7 +65,6 @@ public static boolean isCellVisibilityEnabled(Connection connection) throws IOEx /** * Utility method for adding label to the system. - * * @param conf * @param label * @return VisibilityLabelsResponse @@ -85,7 +81,6 @@ public static VisibilityLabelsResponse addLabel(Configuration conf, final String /** * Utility method for adding label to the system. - * * @param connection * @param label * @return VisibilityLabelsResponse @@ -98,7 +93,6 @@ public static VisibilityLabelsResponse addLabel(Connection connection, final Str /** * Utility method for adding labels to the system. - * * @param conf * @param labels * @return VisibilityLabelsResponse @@ -115,7 +109,6 @@ public static VisibilityLabelsResponse addLabels(Configuration conf, final Strin /** * Utility method for adding labels to the system. - * * @param connection * @param labels * @return VisibilityLabelsResponse @@ -126,31 +119,32 @@ public static VisibilityLabelsResponse addLabels(Connection connection, final St try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); - for (String label : labels) { - if (label.length() > 0) { - VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); - builder.addVisLabel(newBuilder.build()); + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) + throws IOException { + VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); + for (String label : labels) { + if (label.length() > 0) { + VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); + newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); + builder.addVisLabel(newBuilder.build()); + } + } + service.addLabels(controller, builder.build(), rpcCallback); + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - } - service.addLabels(controller, builder.build(), rpcCallback); - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; + }; Map result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -211,25 +205,24 @@ public static GetAuthsResponse getAuths(Connection connection, final String user try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { - GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); - getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); - GetAuthsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = - table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + @Override + public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { + GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); + getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); + GetAuthsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; + } + }; + Map result = table.coprocessorService(VisibilityLabelsService.class, + HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -238,7 +231,7 @@ public GetAuthsResponse call(VisibilityLabelsService service) throws IOException /** * Retrieve the list of visibility labels defined in the system. * @param conf - * @param regex The regular expression to filter which labels are returned. + * @param regex The regular expression to filter which labels are returned. * @return labels The list of visibility labels defined in the system. * @throws Throwable * @deprecated Use {@link #listLabels(Connection,String)} instead. @@ -246,7 +239,7 @@ public GetAuthsResponse call(VisibilityLabelsService service) throws IOException @Deprecated public static ListLabelsResponse listLabels(Configuration conf, final String regex) throws Throwable { - try(Connection connection = ConnectionFactory.createConnection(conf)){ + try (Connection connection = ConnectionFactory.createConnection(conf)) { return listLabels(connection, regex); } } @@ -254,7 +247,7 @@ public static ListLabelsResponse listLabels(Configuration conf, final String reg /** * Retrieve the list of visibility labels defined in the system. * @param connection The Connection instance to use. - * @param regex The regular expression to filter which labels are returned. + * @param regex The regular expression to filter which labels are returned. * @return labels The list of visibility labels defined in the system. * @throws Throwable */ @@ -263,29 +256,29 @@ public static ListLabelsResponse listLabels(Connection connection, final String try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { - ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); - if (regex != null) { - // Compile the regex here to catch any regex exception earlier. - Pattern pattern = Pattern.compile(regex); - listAuthLabelsReqBuilder.setRegex(pattern.toString()); - } - service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); - ListLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; + @Override + public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { + ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); + if (regex != null) { + // Compile the regex here to catch any regex exception earlier. + Pattern pattern = Pattern.compile(regex); + listAuthLabelsReqBuilder.setRegex(pattern.toString()); + } + service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); + ListLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; + } + }; Map result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -328,34 +321,35 @@ private static VisibilityLabelsResponse setOrClearAuths(Connection connection, try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - @Override - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); - setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - for (String auth : auths) { - if (auth.length() > 0) { - setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) + throws IOException { + SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); + setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + for (String auth : auths) { + if (auth.length() > 0) { + setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); + } + } + if (setOrClear) { + service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } else { + service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; } - } - if (setOrClear) { - service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } else { - service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; - Map result = table.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java index 0945dd98afc2..18bf96a28d4a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityConstants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -19,8 +19,8 @@ import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public final class VisibilityConstants { @@ -31,8 +31,8 @@ public final class VisibilityConstants { public static final String VISIBILITY_LABELS_ATTR_KEY = "VISIBILITY"; /** Internal storage table for visibility labels */ - public static final TableName LABELS_TABLE_NAME = TableName.valueOf( - NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "labels"); + public static final TableName LABELS_TABLE_NAME = + TableName.valueOf(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR, "labels"); /** Family for the internal storage table for visibility labels */ public static final byte[] LABELS_TABLE_FAMILY = Bytes.toBytes("f"); @@ -41,15 +41,15 @@ public final class VisibilityConstants { public static final byte[] LABEL_QUALIFIER = new byte[1]; /** - * Visibility serialization version format. It indicates the visibility labels - * are sorted based on ordinal + * Visibility serialization version format. It indicates the visibility labels are sorted based on + * ordinal **/ public static final byte SORTED_ORDINAL_SERIALIZATION_FORMAT = 1; /** Byte representation of the visibility_serialization_version **/ public static final byte[] SORTED_ORDINAL_SERIALIZATION_FORMAT_TAG_VAL = new byte[] { SORTED_ORDINAL_SERIALIZATION_FORMAT }; - public static final String CHECK_AUTHS_FOR_MUTATION = + public static final String CHECK_AUTHS_FOR_MUTATION = "hbase.security.visibility.mutations.checkauths"; public static final String NOT_OPERATOR = "!"; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java index a73d47501912..7d8d550e82e1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityControllerNotReadyException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /* diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java index 874b2b42cec3..dfbb0b9d02b2 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityLabelsValidator.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.security.visibility; import java.util.regex.Pattern; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -54,7 +53,7 @@ public class VisibilityLabelsValidator { validAuthChars['.'] = true; validAuthChars['/'] = true; } - + static final boolean isValidAuthChar(byte b) { return validAuthChars[0xff & b]; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java index 523b22424146..50e1ea3bb61f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ProtobufUtil.java @@ -43,10 +43,8 @@ import java.util.function.Function; import java.util.regex.Pattern; import java.util.stream.Collectors; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.ByteBufferExtendedCell; import org.apache.hadoop.hbase.CacheEvictionStats; import org.apache.hadoop.hbase.CacheEvictionStatsBuilder; @@ -70,9 +68,10 @@ import org.apache.hadoop.hbase.ServerTaskBuilder; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.BalanceRequest; import org.apache.hadoop.hbase.client.BalanceResponse; -import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.BalancerDecision; +import org.apache.hadoop.hbase.client.BalancerRejection; import org.apache.hadoop.hbase.client.CheckAndMutate; import org.apache.hadoop.hbase.client.ClientUtil; import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; @@ -119,6 +118,30 @@ import org.apache.hadoop.hbase.replication.ReplicationLoadSource; import org.apache.hadoop.hbase.security.visibility.Authorizations; import org.apache.hadoop.hbase.security.visibility.CellVisibility; +import org.apache.hadoop.hbase.util.Addressing; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.DynamicClassLoader; +import org.apache.hadoop.hbase.util.ExceptionUtil; +import org.apache.hadoop.hbase.util.Methods; +import org.apache.hadoop.hbase.util.VersionInfo; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; +import org.apache.hbase.thirdparty.com.google.gson.JsonArray; +import org.apache.hbase.thirdparty.com.google.gson.JsonElement; +import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; +import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; +import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hbase.thirdparty.com.google.protobuf.Message; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; +import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; +import org.apache.hbase.thirdparty.com.google.protobuf.Service; +import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; +import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; +import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; + import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService; import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.ClearSlowLogResponses; @@ -190,36 +213,13 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType; import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor; import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.util.Addressing; -import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.DynamicClassLoader; -import org.apache.hadoop.hbase.util.ExceptionUtil; -import org.apache.hadoop.hbase.util.Methods; -import org.apache.hadoop.hbase.util.VersionInfo; -import org.apache.hadoop.ipc.RemoteException; -import org.apache.hbase.thirdparty.com.google.common.io.ByteStreams; -import org.apache.hbase.thirdparty.com.google.gson.JsonArray; -import org.apache.hbase.thirdparty.com.google.gson.JsonElement; -import org.apache.hbase.thirdparty.com.google.protobuf.ByteString; -import org.apache.hbase.thirdparty.com.google.protobuf.CodedInputStream; -import org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException; -import org.apache.hbase.thirdparty.com.google.protobuf.Message; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcChannel; -import org.apache.hbase.thirdparty.com.google.protobuf.RpcController; -import org.apache.hbase.thirdparty.com.google.protobuf.Service; -import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; -import org.apache.hbase.thirdparty.com.google.protobuf.TextFormat; -import org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations; -import org.apache.hbase.thirdparty.org.apache.commons.collections4.CollectionUtils; -import org.apache.yetus.audience.InterfaceAudience; /** - * Protobufs utility. - * Be aware that a class named org.apache.hadoop.hbase.protobuf.ProtobufUtil (i.e. no 'shaded' in - * the package name) carries a COPY of a subset of this class for non-shaded - * users; e.g. Coprocessor Endpoints. If you make change in here, be sure to make change in - * the companion class too (not the end of the world, especially if you are adding new functionality - * but something to be aware of. + * Protobufs utility. Be aware that a class named org.apache.hadoop.hbase.protobuf.ProtobufUtil + * (i.e. no 'shaded' in the package name) carries a COPY of a subset of this class for non-shaded + * users; e.g. Coprocessor Endpoints. If you make change in here, be sure to make change in the + * companion class too (not the end of the world, especially if you are adding new functionality but + * something to be aware of. */ @InterfaceAudience.Private // TODO: some clients (Hive, etc) use this class public final class ProtobufUtil { @@ -228,18 +228,18 @@ private ProtobufUtil() { } /** - * Many results are simple: no cell, exists true or false. To save on object creations, - * we reuse them across calls. + * Many results are simple: no cell, exists true or false. To save on object creations, we reuse + * them across calls. */ - private final static Cell[] EMPTY_CELL_ARRAY = new Cell[]{}; + private final static Cell[] EMPTY_CELL_ARRAY = new Cell[] {}; private final static Result EMPTY_RESULT = Result.create(EMPTY_CELL_ARRAY); final static Result EMPTY_RESULT_EXISTS_TRUE = Result.create(null, true); final static Result EMPTY_RESULT_EXISTS_FALSE = Result.create(null, false); private final static Result EMPTY_RESULT_STALE = Result.create(EMPTY_CELL_ARRAY, null, true); - private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE - = Result.create((Cell[])null, true, true); - private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE - = Result.create((Cell[])null, false, true); + private final static Result EMPTY_RESULT_EXISTS_TRUE_STALE = + Result.create((Cell[]) null, true, true); + private final static Result EMPTY_RESULT_EXISTS_FALSE_STALE = + Result.create((Cell[]) null, false, true); private final static ClientProtos.Result EMPTY_RESULT_PB; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE; @@ -248,13 +248,12 @@ private ProtobufUtil() { private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_TRUE_STALE; private final static ClientProtos.Result EMPTY_RESULT_PB_EXISTS_FALSE_STALE; - static { ClientProtos.Result.Builder builder = ClientProtos.Result.newBuilder(); builder.setExists(true); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); + EMPTY_RESULT_PB_EXISTS_TRUE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_TRUE_STALE = builder.build(); @@ -262,13 +261,13 @@ private ProtobufUtil() { builder.setExists(false); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); + EMPTY_RESULT_PB_EXISTS_FALSE = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_EXISTS_FALSE_STALE = builder.build(); builder.clear(); builder.setAssociatedCellCount(0); - EMPTY_RESULT_PB = builder.build(); + EMPTY_RESULT_PB = builder.build(); builder.setStale(true); EMPTY_RESULT_PB_STALE = builder.build(); } @@ -284,9 +283,8 @@ private final static class ClassLoaderHolder { static { ClassLoader parent = ProtobufUtil.class.getClassLoader(); Configuration conf = HBaseConfiguration.create(); - CLASS_LOADER = AccessController.doPrivileged((PrivilegedAction) - () -> new DynamicClassLoader(conf, parent) - ); + CLASS_LOADER = AccessController + .doPrivileged((PrivilegedAction) () -> new DynamicClassLoader(conf, parent)); classLoaderLoaded = true; } } @@ -296,14 +294,13 @@ public static boolean isClassLoaderLoaded() { } /** - * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, - * to flag what follows as a protobuf in hbase. Prepend these bytes to all content written to - * znodes, etc. + * Prepend the passed bytes with four bytes of magic, {@link ProtobufMagic#PB_MAGIC}, to flag what + * follows as a protobuf in hbase. Prepend these bytes to all content written to znodes, etc. * @param bytes Bytes to decorate - * @return The passed bytes with magic prepended (Creates a new - * byte array that is bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. + * @return The passed bytes with magic prepended (Creates a new byte array that is + * bytes.length plus {@link ProtobufMagic#PB_MAGIC}.length. */ - public static byte [] prependPBMagic(final byte [] bytes) { + public static byte[] prependPBMagic(final byte[] bytes) { return Bytes.add(PB_MAGIC, bytes); } @@ -311,7 +308,7 @@ public static boolean isClassLoaderLoaded() { * @param bytes Bytes to check. * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes) { + public static boolean isPBMagicPrefix(final byte[] bytes) { return ProtobufMagic.isPBMagicPrefix(bytes); } @@ -321,7 +318,7 @@ public static boolean isPBMagicPrefix(final byte [] bytes) { * @param len length to use * @return True if passed bytes has {@link ProtobufMagic#PB_MAGIC} for a prefix. */ - public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) { + public static boolean isPBMagicPrefix(final byte[] bytes, int offset, int len) { return ProtobufMagic.isPBMagicPrefix(bytes, offset, len); } @@ -344,20 +341,18 @@ public static int lengthOfPBMagic() { return ProtobufMagic.lengthOfPBMagic(); } - public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final byte [] value) { + public static ComparatorProtos.ByteArrayComparable toByteArrayComparable(final byte[] value) { ComparatorProtos.ByteArrayComparable.Builder builder = - ComparatorProtos.ByteArrayComparable.newBuilder(); + ComparatorProtos.ByteArrayComparable.newBuilder(); if (value != null) builder.setValue(UnsafeByteOperations.unsafeWrap(value)); return builder.build(); } /** - * Return the IOException thrown by the remote server wrapped in - * ServiceException as cause. - * + * Return the IOException thrown by the remote server wrapped in ServiceException as cause. * @param se ServiceException that wraps IO exception thrown by the server - * @return Exception wrapped in ServiceException or - * a new IOException that wraps the unexpected ServiceException. + * @return Exception wrapped in ServiceException or a new IOException that wraps the unexpected + * ServiceException. */ public static IOException getRemoteException(ServiceException se) { return makeIOExceptionOfException(se); @@ -366,8 +361,8 @@ public static IOException getRemoteException(ServiceException se) { /** * Like {@link #getRemoteException(ServiceException)} but more generic, able to handle more than * just {@link ServiceException}. Prefer this method to - * {@link #getRemoteException(ServiceException)} because trying to - * contain direct protobuf references. + * {@link #getRemoteException(ServiceException)} because trying to contain direct protobuf + * references. * @param e */ public static IOException handleRemoteException(Exception e) { @@ -383,14 +378,13 @@ private static IOException makeIOExceptionOfException(Exception e) { return ExceptionUtil.asInterrupt(t); } if (t instanceof RemoteException) { - t = ((RemoteException)t).unwrapRemoteException(); + t = ((RemoteException) t).unwrapRemoteException(); } - return t instanceof IOException? (IOException)t: new HBaseIOException(t); + return t instanceof IOException ? (IOException) t : new HBaseIOException(t); } /** * Convert a ServerName to a protocol buffer ServerName - * * @param serverName the ServerName to convert * @return the converted protocol buffer ServerName * @see #toServerName(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName) @@ -399,8 +393,7 @@ public static HBaseProtos.ServerName toServerName(final ServerName serverName) { if (serverName == null) { return null; } - HBaseProtos.ServerName.Builder builder = - HBaseProtos.ServerName.newBuilder(); + HBaseProtos.ServerName.Builder builder = HBaseProtos.ServerName.newBuilder(); builder.setHostName(serverName.getHostname()); if (serverName.getPort() >= 0) { builder.setPort(serverName.getPort()); @@ -413,7 +406,6 @@ public static HBaseProtos.ServerName toServerName(final ServerName serverName) { /** * Convert a protocol buffer ServerName to a ServerName - * * @param proto the protocol buffer ServerName to convert * @return the converted ServerName */ @@ -436,10 +428,8 @@ public static ServerName toServerName(final HBaseProtos.ServerName proto) { * @param proto protocol buffer ServerNameList * @return a list of ServerName */ - public static List toServerNameList( - List proto) { - return proto.stream().map(ProtobufUtil::toServerName) - .collect(Collectors.toList()); + public static List toServerNameList(List proto) { + return proto.stream().map(ProtobufUtil::toServerName).collect(Collectors.toList()); } /** @@ -447,8 +437,8 @@ public static List toServerNameList( * @param proto the ListNamespaceDescriptorsResponse * @return a list of NamespaceDescriptor */ - public static List toNamespaceDescriptorList( - ListNamespaceDescriptorsResponse proto) { + public static List + toNamespaceDescriptorList(ListNamespaceDescriptorsResponse proto) { return proto.getNamespaceDescriptorList().stream().map(ProtobufUtil::toNamespaceDescriptor) .collect(Collectors.toList()); } @@ -480,12 +470,11 @@ public static List toTableDescriptorList(GetTableDescriptorsRes /** * get the split keys in form "byte [][]" from a CreateTableRequest proto - * * @param proto the CreateTableRequest * @return the split keys */ - public static byte [][] getSplitKeysArray(final CreateTableRequest proto) { - byte [][] splitKeys = new byte[proto.getSplitKeysCount()][]; + public static byte[][] getSplitKeysArray(final CreateTableRequest proto) { + byte[][] splitKeys = new byte[proto.getSplitKeysCount()][]; for (int i = 0; i < proto.getSplitKeysCount(); ++i) { splitKeys[i] = proto.getSplitKeys(i).toByteArray(); } @@ -495,48 +484,45 @@ public static List toTableDescriptorList(GetTableDescriptorsRes /** * Convert a protobuf Durability into a client Durability */ - public static Durability toDurability( - final ClientProtos.MutationProto.Durability proto) { - switch(proto) { - case USE_DEFAULT: - return Durability.USE_DEFAULT; - case SKIP_WAL: - return Durability.SKIP_WAL; - case ASYNC_WAL: - return Durability.ASYNC_WAL; - case SYNC_WAL: - return Durability.SYNC_WAL; - case FSYNC_WAL: - return Durability.FSYNC_WAL; - default: - return Durability.USE_DEFAULT; + public static Durability toDurability(final ClientProtos.MutationProto.Durability proto) { + switch (proto) { + case USE_DEFAULT: + return Durability.USE_DEFAULT; + case SKIP_WAL: + return Durability.SKIP_WAL; + case ASYNC_WAL: + return Durability.ASYNC_WAL; + case SYNC_WAL: + return Durability.SYNC_WAL; + case FSYNC_WAL: + return Durability.FSYNC_WAL; + default: + return Durability.USE_DEFAULT; } } /** * Convert a client Durability into a protbuf Durability */ - public static ClientProtos.MutationProto.Durability toDurability( - final Durability d) { - switch(d) { - case USE_DEFAULT: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; - case SKIP_WAL: - return ClientProtos.MutationProto.Durability.SKIP_WAL; - case ASYNC_WAL: - return ClientProtos.MutationProto.Durability.ASYNC_WAL; - case SYNC_WAL: - return ClientProtos.MutationProto.Durability.SYNC_WAL; - case FSYNC_WAL: - return ClientProtos.MutationProto.Durability.FSYNC_WAL; - default: - return ClientProtos.MutationProto.Durability.USE_DEFAULT; + public static ClientProtos.MutationProto.Durability toDurability(final Durability d) { + switch (d) { + case USE_DEFAULT: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; + case SKIP_WAL: + return ClientProtos.MutationProto.Durability.SKIP_WAL; + case ASYNC_WAL: + return ClientProtos.MutationProto.Durability.ASYNC_WAL; + case SYNC_WAL: + return ClientProtos.MutationProto.Durability.SYNC_WAL; + case FSYNC_WAL: + return ClientProtos.MutationProto.Durability.FSYNC_WAL; + default: + return ClientProtos.MutationProto.Durability.USE_DEFAULT; } } /** * Convert a protocol buffer Get to a client Get - * * @param proto the protocol buffer Get to convert * @return the converted client Get * @throws IOException @@ -560,8 +546,8 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = toTimeRange(cftr.getTimeRange()); - get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + get.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -572,14 +558,14 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { FilterProtos.Filter filter = proto.getFilter(); get.setFilter(ProtobufUtil.toFilter(filter)); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { get.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { get.addColumn(family, qualifier.toByteArray()); } } else { @@ -587,7 +573,7 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { } } } - if (proto.hasExistenceOnly() && proto.getExistenceOnly()){ + if (proto.hasExistenceOnly() && proto.getExistenceOnly()) { get.setCheckExistenceOnly(true); } if (proto.hasConsistency()) { @@ -601,58 +587,61 @@ public static Get toGet(final ClientProtos.Get proto) throws IOException { public static Consistency toConsistency(ClientProtos.Consistency consistency) { switch (consistency) { - case STRONG : return Consistency.STRONG; - case TIMELINE : return Consistency.TIMELINE; - default : return Consistency.STRONG; + case STRONG: + return Consistency.STRONG; + case TIMELINE: + return Consistency.TIMELINE; + default: + return Consistency.STRONG; } } public static ClientProtos.Consistency toConsistency(Consistency consistency) { switch (consistency) { - case STRONG : return ClientProtos.Consistency.STRONG; - case TIMELINE : return ClientProtos.Consistency.TIMELINE; - default : return ClientProtos.Consistency.STRONG; + case STRONG: + return ClientProtos.Consistency.STRONG; + case TIMELINE: + return ClientProtos.Consistency.TIMELINE; + default: + return ClientProtos.Consistency.STRONG; } } /** * Convert a protocol buffer Mutate to a Put. - * * @param proto The protocol buffer MutationProto to convert * @return A client Put. * @throws IOException */ - public static Put toPut(final MutationProto proto) - throws IOException { + public static Put toPut(final MutationProto proto) throws IOException { return toPut(proto, null); } /** * Convert a protocol buffer Mutate to a Put. - * * @param proto The protocol buffer MutationProto to convert * @param cellScanner If non-null, the Cell data that goes with this proto. * @return A client Put. * @throws IOException */ public static Put toPut(final MutationProto proto, final CellScanner cellScanner) - throws IOException { - // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? + throws IOException { + // TODO: Server-side at least why do we convert back to the Client types? Why not just pb it? MutationType type = proto.getMutateType(); - assert type == MutationType.PUT: type.name(); - long timestamp = proto.hasTimestamp()? proto.getTimestamp(): HConstants.LATEST_TIMESTAMP; + assert type == MutationType.PUT : type.name(); + long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Put put = proto.hasRow() ? new Put(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (put == null) { @@ -665,13 +654,13 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner throw new IllegalArgumentException("row cannot be null"); } // The proto has the metadata and the data itself - ExtendedCellBuilder cellBuilder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (ColumnValue column: proto.getColumnValueList()) { + ExtendedCellBuilder cellBuilder = + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } long ts = timestamp; if (qv.hasTimestamp()) { @@ -680,51 +669,35 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner byte[] allTagsBytes; if (qv.hasTags()) { allTagsBytes = qv.getTags().toByteArray(); - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(proto.getRow().toByteArray()) - .setFamily(family) + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(proto.getRow().toByteArray()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .setTags(allTagsBytes) - .build()); + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()) + .setTags(allTagsBytes).build()); } else { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Cell.Type.Put) + .setTimestamp(ts).setType(Cell.Type.Put) .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .setTags(allTagsBytes) - .build()); + .setTags(allTagsBytes).build()); } } else { - if(qv.hasDeleteType()) { - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) + if (qv.hasDeleteType()) { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(fromDeleteType(qv.getDeleteType()).getCode()) - .build()); - } else{ - put.add(cellBuilder.clear() - .setRow(put.getRow()) - .setFamily(family) + .setTimestamp(ts).setType(fromDeleteType(qv.getDeleteType()).getCode()).build()); + } else { + put.add(cellBuilder.clear().setRow(put.getRow()).setFamily(family) .setQualifier(qv.hasQualifier() ? qv.getQualifier().toByteArray() : null) - .setTimestamp(ts) - .setType(Type.Put) - .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null) - .build()); + .setTimestamp(ts).setType(Type.Put) + .setValue(qv.hasValue() ? qv.getValue().toByteArray() : null).build()); } } } } } put.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { put.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return put; @@ -732,48 +705,45 @@ public static Put toPut(final MutationProto proto, final CellScanner cellScanner /** * Convert a protocol buffer Mutate to a Delete - * * @param proto the protocol buffer Mutate to convert * @return the converted client Delete * @throws IOException */ - public static Delete toDelete(final MutationProto proto) - throws IOException { + public static Delete toDelete(final MutationProto proto) throws IOException { return toDelete(proto, null); } /** * Convert a protocol buffer Mutate to a Delete - * * @param proto the protocol buffer Mutate to convert * @param cellScanner if non-null, the data that goes with this delete. * @return the converted client Delete * @throws IOException */ public static Delete toDelete(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.DELETE : type.name(); long timestamp = proto.hasTimestamp() ? proto.getTimestamp() : HConstants.LATEST_TIMESTAMP; Delete delete = proto.hasRow() ? new Delete(proto.getRow().toByteArray(), timestamp) : null; - int cellCount = proto.hasAssociatedCellCount()? proto.getAssociatedCellCount(): 0; + int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + + TextFormat.shortDebugString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { // TextFormat should be fine for a Delete since it carries no data, just coordinates. - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + TextFormat.shortDebugString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + TextFormat.shortDebugString(proto)); } Cell cell = cellScanner.current(); if (delete == null) { delete = - new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp); + new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength(), timestamp); } delete.add(cell); } @@ -781,9 +751,9 @@ public static Delete toDelete(final MutationProto proto, final CellScanner cellS if (delete == null) { throw new IllegalArgumentException("row cannot be null"); } - for (ColumnValue column: proto.getColumnValueList()) { + for (ColumnValue column : proto.getColumnValueList()) { byte[] family = column.getFamily().toByteArray(); - for (QualifierValue qv: column.getQualifierValueList()) { + for (QualifierValue qv : column.getQualifierValueList()) { DeleteType deleteType = qv.getDeleteType(); byte[] qualifier = null; if (qv.hasQualifier()) { @@ -803,35 +773,38 @@ public static Delete toDelete(final MutationProto proto, final CellScanner cellS } } delete.setDurability(toDurability(proto.getDurability())); - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { delete.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } return delete; } + @FunctionalInterface - private interface ConsumerWithException { + private interface ConsumerWithException { void accept(T t, U u) throws IOException; } - private static T toDelta(Function supplier, ConsumerWithException consumer, - final MutationProto proto, final CellScanner cellScanner) throws IOException { + private static T toDelta(Function supplier, + ConsumerWithException consumer, final MutationProto proto, + final CellScanner cellScanner) throws IOException { byte[] row = proto.hasRow() ? proto.getRow().toByteArray() : null; T mutation = row == null ? null : supplier.apply(new Bytes(row)); int cellCount = proto.hasAssociatedCellCount() ? proto.getAssociatedCellCount() : 0; if (cellCount > 0) { // The proto has metadata only and the data is separate to be found in the cellScanner. if (cellScanner == null) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but no cellScanner: " + - toShortString(proto)); + throw new DoNotRetryIOException( + "Cell count of " + cellCount + " but no cellScanner: " + toShortString(proto)); } for (int i = 0; i < cellCount; i++) { if (!cellScanner.advance()) { - throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + - " no cell returned: " + toShortString(proto)); + throw new DoNotRetryIOException("Cell count of " + cellCount + " but at index " + i + + " no cell returned: " + toShortString(proto)); } Cell cell = cellScanner.current(); if (mutation == null) { - mutation = supplier.apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + mutation = supplier + .apply(new Bytes(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); } consumer.accept(mutation, cell); } @@ -844,23 +817,18 @@ private static T toDelta(Function supplier, Consu for (QualifierValue qv : column.getQualifierValueList()) { byte[] qualifier = qv.getQualifier().toByteArray(); if (!qv.hasValue()) { - throw new DoNotRetryIOException( - "Missing required field: qualifier value"); + throw new DoNotRetryIOException("Missing required field: qualifier value"); } byte[] value = qv.getValue().toByteArray(); byte[] tags = null; if (qv.hasTags()) { tags = qv.getTags().toByteArray(); } - consumer.accept(mutation, ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(mutation.getRow()) - .setFamily(family) - .setQualifier(qualifier) - .setTimestamp(cellTimestampOrLatest(qv)) - .setType(KeyValue.Type.Put.getCode()) - .setValue(value) - .setTags(tags) - .build()); + consumer.accept(mutation, + ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) + .setRow(mutation.getRow()).setFamily(family).setQualifier(qualifier) + .setTimestamp(cellTimestampOrLatest(qv)).setType(KeyValue.Type.Put.getCode()) + .setValue(value).setTags(tags).build()); } } } @@ -887,11 +855,11 @@ private static long cellTimestampOrLatest(QualifierValue cell) { * @throws IOException */ public static Append toAppend(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.APPEND : type.name(); Append append = toDelta((Bytes row) -> new Append(row.get(), row.getOffset(), row.getLength()), - Append::add, proto, cellScanner); + Append::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = toTimeRange(proto.getTimeRange()); append.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -901,17 +869,17 @@ public static Append toAppend(final MutationProto proto, final CellScanner cellS /** * Convert a protocol buffer Mutate to an Increment - * * @param proto the protocol buffer Mutate to convert * @return the converted client Increment * @throws IOException */ public static Increment toIncrement(final MutationProto proto, final CellScanner cellScanner) - throws IOException { + throws IOException { MutationType type = proto.getMutateType(); assert type == MutationType.INCREMENT : type.name(); - Increment increment = toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), - Increment::add, proto, cellScanner); + Increment increment = + toDelta((Bytes row) -> new Increment(row.get(), row.getOffset(), row.getLength()), + Increment::add, proto, cellScanner); if (proto.hasTimeRange()) { TimeRange timeRange = toTimeRange(proto.getTimeRange()); increment.setTimeRange(timeRange.getMin(), timeRange.getMax()); @@ -921,7 +889,6 @@ public static Increment toIncrement(final MutationProto proto, final CellScanner /** * Convert a MutateRequest to Mutation - * * @param proto the protocol buffer Mutate to convert * @return the converted Mutation * @throws IOException @@ -971,15 +938,12 @@ public static Scan.ReadType toReadType(ClientProtos.Scan.ReadType readType) { /** * Convert a client Scan to a protocol buffer Scan - * * @param scan the client Scan to convert * @return the converted protocol buffer Scan * @throws IOException */ - public static ClientProtos.Scan toScan( - final Scan scan) throws IOException { - ClientProtos.Scan.Builder scanBuilder = - ClientProtos.Scan.newBuilder(); + public static ClientProtos.Scan toScan(final Scan scan) throws IOException { + ClientProtos.Scan.Builder scanBuilder = ClientProtos.Scan.newBuilder(); scanBuilder.setCacheBlocks(scan.getCacheBlocks()); if (scan.getBatch() > 0) { scanBuilder.setBatchSize(scan.getBatch()); @@ -1000,15 +964,14 @@ public static ClientProtos.Scan toScan( scanBuilder.setMaxVersions(scan.getMaxVersions()); scan.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { scanBuilder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)) - .setTimeRange(toTimeRange(timeRange)) - .build()); + .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)).setTimeRange(toTimeRange(timeRange)) + .build()); }); scanBuilder.setTimeRange(ProtobufUtil.toTimeRange(scan.getTimeRange())); Map attributes = scan.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); scanBuilder.addAttribute(attributeBuilder.build()); @@ -1027,13 +990,12 @@ public static ClientProtos.Scan toScan( } if (scan.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); - for (Map.Entry> - family: scan.getFamilyMap().entrySet()) { + for (Map.Entry> family : scan.getFamilyMap().entrySet()) { columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - NavigableSet qualifiers = family.getValue(); + NavigableSet qualifiers = family.getValue(); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte [] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } } @@ -1074,13 +1036,11 @@ public static ClientProtos.Scan toScan( /** * Convert a protocol buffer Scan to a client Scan - * * @param proto the protocol buffer Scan to convert * @return the converted client Scan * @throws IOException */ - public static Scan toScan( - final ClientProtos.Scan proto) throws IOException { + public static Scan toScan(final ClientProtos.Scan proto) throws IOException { byte[] startRow = HConstants.EMPTY_START_ROW; byte[] stopRow = HConstants.EMPTY_END_ROW; boolean includeStartRow = true; @@ -1122,8 +1082,8 @@ public static Scan toScan( if (proto.getCfTimeRangeCount() > 0) { for (HBaseProtos.ColumnFamilyTimeRange cftr : proto.getCfTimeRangeList()) { TimeRange timeRange = toTimeRange(cftr.getTimeRange()); - scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), - timeRange.getMin(), timeRange.getMax()); + scan.setColumnFamilyTimeRange(cftr.getColumnFamily().toByteArray(), timeRange.getMin(), + timeRange.getMax()); } } if (proto.hasTimeRange()) { @@ -1146,14 +1106,14 @@ public static Scan toScan( if (proto.hasAllowPartialResults()) { scan.setAllowPartialResults(proto.getAllowPartialResults()); } - for (NameBytesPair attribute: proto.getAttributeList()) { + for (NameBytesPair attribute : proto.getAttributeList()) { scan.setAttribute(attribute.getName(), attribute.getValue().toByteArray()); } if (proto.getColumnCount() > 0) { - for (Column column: proto.getColumnList()) { + for (Column column : proto.getColumnList()) { byte[] family = column.getFamily().toByteArray(); if (column.getQualifierCount() > 0) { - for (ByteString qualifier: column.getQualifierList()) { + for (ByteString qualifier : column.getQualifierList()) { scan.addColumn(family, qualifier.toByteArray()); } } else { @@ -1202,15 +1162,12 @@ public static Cursor toCursor(ClientProtos.Cursor cursor) { /** * Create a protocol buffer Get based on a client Get. - * * @param get the client Get * @return a protocol buffer Get * @throws IOException */ - public static ClientProtos.Get toGet( - final Get get) throws IOException { - ClientProtos.Get.Builder builder = - ClientProtos.Get.newBuilder(); + public static ClientProtos.Get toGet(final Get get) throws IOException { + ClientProtos.Get.Builder builder = ClientProtos.Get.newBuilder(); builder.setRow(UnsafeByteOperations.unsafeWrap(get.getRow())); builder.setCacheBlocks(get.getCacheBlocks()); builder.setMaxVersions(get.getMaxVersions()); @@ -1219,15 +1176,14 @@ public static ClientProtos.Get toGet( } get.getColumnFamilyTimeRange().forEach((cf, timeRange) -> { builder.addCfTimeRange(HBaseProtos.ColumnFamilyTimeRange.newBuilder() - .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)) - .setTimeRange(toTimeRange(timeRange)) - .build()); + .setColumnFamily(UnsafeByteOperations.unsafeWrap(cf)).setTimeRange(toTimeRange(timeRange)) + .build()); }); builder.setTimeRange(ProtobufUtil.toTimeRange(get.getTimeRange())); Map attributes = get.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1236,12 +1192,12 @@ public static ClientProtos.Get toGet( if (get.hasFamilies()) { Column.Builder columnBuilder = Column.newBuilder(); Map> families = get.getFamilyMap(); - for (Map.Entry> family: families.entrySet()) { + for (Map.Entry> family : families.entrySet()) { NavigableSet qualifiers = family.getValue(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); columnBuilder.clearQualifier(); if (qualifiers != null && qualifiers.size() > 0) { - for (byte[] qualifier: qualifiers) { + for (byte[] qualifier : qualifiers) { columnBuilder.addQualifier(UnsafeByteOperations.unsafeWrap(qualifier)); } } @@ -1254,7 +1210,7 @@ public static ClientProtos.Get toGet( if (get.getRowOffsetPerColumnFamily() > 0) { builder.setStoreOffset(get.getRowOffsetPerColumnFamily()); } - if (get.isCheckExistenceOnly()){ + if (get.isCheckExistenceOnly()) { builder.setExistenceOnly(true); } if (get.getConsistency() != null && get.getConsistency() != Consistency.STRONG) { @@ -1269,20 +1225,19 @@ public static ClientProtos.Get toGet( } public static MutationProto toMutation(final MutationType type, final Mutation mutation) - throws IOException { + throws IOException { return toMutation(type, mutation, HConstants.NO_NONCE); } /** * Create a protocol buffer Mutate based on a client Mutation - * * @param type * @param mutation * @return a protobuf'd Mutation * @throws IOException */ public static MutationProto toMutation(final MutationType type, final Mutation mutation, - final long nonce) throws IOException { + final long nonce) throws IOException { return toMutation(type, mutation, MutationProto.newBuilder(), nonce); } @@ -1292,8 +1247,7 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } public static MutationProto toMutation(final MutationType type, final Mutation mutation, - MutationProto.Builder builder, long nonce) - throws IOException { + MutationProto.Builder builder, long nonce) throws IOException { builder = getMutationBuilderAndSetCommonFields(type, mutation, builder); if (nonce != HConstants.NO_NONCE) { builder.setNonce(nonce); @@ -1306,15 +1260,15 @@ public static MutationProto toMutation(final MutationType type, final Mutation m } ColumnValue.Builder columnBuilder = ColumnValue.newBuilder(); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); - for (Map.Entry> family: mutation.getFamilyCellMap().entrySet()) { + for (Map.Entry> family : mutation.getFamilyCellMap().entrySet()) { columnBuilder.clear(); columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey())); - for (Cell cell: family.getValue()) { + for (Cell cell : family.getValue()) { valueBuilder.clear(); - valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap( - cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength())); - valueBuilder.setValue(UnsafeByteOperations.unsafeWrap( - cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())); + valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), + cell.getQualifierOffset(), cell.getQualifierLength())); + valueBuilder.setValue(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), + cell.getValueOffset(), cell.getValueLength())); valueBuilder.setTimestamp(cell.getTimestamp()); if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) { KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte()); @@ -1337,12 +1291,12 @@ public static MutationProto toMutation(final MutationType type, final Mutation m * @throws IOException */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation, - final MutationProto.Builder builder) throws IOException { + final MutationProto.Builder builder) throws IOException { return toMutationNoData(type, mutation, builder, HConstants.NO_NONCE); } /** - * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. + * Create a protocol buffer MutationProto based on a client Mutation. Does NOT include data. * Understanding is that the Cell will be transported other than via protobuf. * @param type * @param mutation @@ -1350,8 +1304,8 @@ public static MutationProto toMutationNoData(final MutationType type, final Muta * @throws IOException */ public static MutationProto toMutationNoData(final MutationType type, final Mutation mutation) - throws IOException { - MutationProto.Builder builder = MutationProto.newBuilder(); + throws IOException { + MutationProto.Builder builder = MutationProto.newBuilder(); return toMutationNoData(type, mutation, builder); } @@ -1387,7 +1341,7 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final Map attributes = mutation.getAttributesMap(); if (!attributes.isEmpty()) { NameBytesPair.Builder attributeBuilder = NameBytesPair.newBuilder(); - for (Map.Entry attribute: attributes.entrySet()) { + for (Map.Entry attribute : attributes.entrySet()) { attributeBuilder.setName(attribute.getKey()); attributeBuilder.setValue(UnsafeByteOperations.unsafeWrap(attribute.getValue())); builder.addAttribute(attributeBuilder.build()); @@ -1398,7 +1352,6 @@ private static MutationProto.Builder getMutationBuilderAndSetCommonFields(final /** * Convert a client Result to a protocol buffer Result - * * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1407,14 +1360,13 @@ public static ClientProtos.Result toResult(final Result result) { } /** - * Convert a client Result to a protocol buffer Result + * Convert a client Result to a protocol buffer Result * @param result the client Result to convert - * @param encodeTags whether to includeTags in converted protobuf result or not - * When @encodeTags is set to true, it will return all the tags in the response. - * These tags may contain some sensitive data like acl permissions, etc. - * Only the tools like Export, Import which needs to take backup needs to set - * it to true so that cell tags are persisted in backup. - * Refer to HBASE-25246 for more context. + * @param encodeTags whether to includeTags in converted protobuf result or not When @encodeTags + * is set to true, it will return all the tags in the response. These tags may contain + * some sensitive data like acl permissions, etc. Only the tools like Export, Import + * which needs to take backup needs to set it to true so that cell tags are persisted in + * backup. Refer to HBASE-25246 for more context. * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final Result result, boolean encodeTags) { @@ -1440,12 +1392,11 @@ public static ClientProtos.Result toResult(final Result result, boolean encodeTa /** * Convert a client Result to a protocol buffer Result - * * @param existence the client existence to send * @return the converted protocol buffer Result */ public static ClientProtos.Result toResult(final boolean existence, boolean stale) { - if (stale){ + if (stale) { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE_STALE : EMPTY_RESULT_PB_EXISTS_FALSE_STALE; } else { return existence ? EMPTY_RESULT_PB_EXISTS_TRUE : EMPTY_RESULT_PB_EXISTS_FALSE; @@ -1453,9 +1404,8 @@ public static ClientProtos.Result toResult(final boolean existence, boolean stal } /** - * Convert a client Result to a protocol buffer Result. - * The pb Result does not include the Cell data. That is for transport otherwise. - * + * Convert a client Result to a protocol buffer Result. The pb Result does not include the Cell + * data. That is for transport otherwise. * @param result the client Result to convert * @return the converted protocol buffer Result */ @@ -1471,7 +1421,6 @@ public static ClientProtos.Result toResultNoData(final Result result) { /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert * @return the converted client Result */ @@ -1481,26 +1430,24 @@ public static Result toResult(final ClientProtos.Result proto) { /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert - * @param decodeTags whether to decode tags into converted client Result - * When @decodeTags is set to true, it will decode all the tags from the - * response. These tags may contain some sensitive data like acl permissions, - * etc. Only the tools like Export, Import which needs to take backup needs to - * set it to true so that cell tags are persisted in backup. - * Refer to HBASE-25246 for more context. + * @param decodeTags whether to decode tags into converted client Result When @decodeTags is set + * to true, it will decode all the tags from the response. These tags may contain some + * sensitive data like acl permissions, etc. Only the tools like Export, Import which + * needs to take backup needs to set it to true so that cell tags are persisted in + * backup. Refer to HBASE-25246 for more context. * @return the converted client Result */ public static Result toResult(final ClientProtos.Result proto, boolean decodeTags) { if (proto.hasExists()) { if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } List values = proto.getCellList(); - if (values.isEmpty()){ + if (values.isEmpty()) { return proto.getStale() ? EMPTY_RESULT_STALE : EMPTY_RESULT; } @@ -1514,23 +1461,22 @@ public static Result toResult(final ClientProtos.Result proto, boolean decodeTag /** * Convert a protocol buffer Result to a client Result - * * @param proto the protocol buffer Result to convert * @param scanner Optional cell scanner. * @return the converted client Result * @throws IOException */ public static Result toResult(final ClientProtos.Result proto, final CellScanner scanner) - throws IOException { + throws IOException { List values = proto.getCellList(); if (proto.hasExists()) { - if ((values != null && !values.isEmpty()) || - (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) { + if ((values != null && !values.isEmpty()) + || (proto.hasAssociatedCellCount() && proto.getAssociatedCellCount() > 0)) { throw new IllegalArgumentException("bad proto: exists with cells is no allowed " + proto); } if (proto.getStale()) { - return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE :EMPTY_RESULT_EXISTS_FALSE_STALE; + return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE_STALE : EMPTY_RESULT_EXISTS_FALSE_STALE; } return proto.getExists() ? EMPTY_RESULT_EXISTS_TRUE : EMPTY_RESULT_EXISTS_FALSE; } @@ -1546,10 +1492,10 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner } } - if (!values.isEmpty()){ + if (!values.isEmpty()) { if (cells == null) cells = new ArrayList<>(values.size()); ExtendedCellBuilder builder = ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); - for (CellProtos.Cell c: values) { + for (CellProtos.Cell c : values) { cells.add(toCell(builder, c, false)); } } @@ -1559,10 +1505,8 @@ public static Result toResult(final ClientProtos.Result proto, final CellScanner : Result.create(cells, null, proto.getStale()); } - /** * Convert a ByteArrayComparable to a protocol buffer Comparator - * * @param comparator the ByteArrayComparable to convert * @return the converted protocol buffer Comparator */ @@ -1575,23 +1519,22 @@ public static ComparatorProtos.Comparator toComparator(ByteArrayComparable compa /** * Convert a protocol buffer Comparator to a ByteArrayComparable - * * @param proto the protocol buffer Comparator to convert * @return the converted ByteArrayComparable */ @SuppressWarnings("unchecked") public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto) - throws IOException { + throws IOException { String type = proto.getName(); String funcName = "parseFrom"; - byte [] value = proto.getSerializedComparator().toByteArray(); + byte[] value = proto.getSerializedComparator().toByteArray(); try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Method parseFrom = c.getMethod(funcName, byte[].class); if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (ByteArrayComparable)parseFrom.invoke(null, value); + return (ByteArrayComparable) parseFrom.invoke(null, value); } catch (Exception e) { throw new IOException(e); } @@ -1599,14 +1542,13 @@ public static ByteArrayComparable toComparator(ComparatorProtos.Comparator proto /** * Convert a protocol buffer Filter to a client Filter - * * @param proto the protocol buffer Filter to convert * @return the converted Filter */ @SuppressWarnings("unchecked") public static Filter toFilter(FilterProtos.Filter proto) throws IOException { String type = proto.getName(); - final byte [] value = proto.getSerializedFilter().toByteArray(); + final byte[] value = proto.getSerializedFilter().toByteArray(); String funcName = "parseFrom"; try { Class c = Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); @@ -1614,7 +1556,7 @@ public static Filter toFilter(FilterProtos.Filter proto) throws IOException { if (parseFrom == null) { throw new IOException("Unable to locate function: " + funcName + " in type: " + type); } - return (Filter)parseFrom.invoke(c, value); + return (Filter) parseFrom.invoke(c, value); } catch (Exception e) { // Either we couldn't instantiate the method object, or "parseFrom" failed. // In either case, let's not retry. @@ -1624,7 +1566,6 @@ public static Filter toFilter(FilterProtos.Filter proto) throws IOException { /** * Convert a client Filter to a protocol buffer Filter - * * @param filter the Filter to convert * @return the converted protocol buffer Filter */ @@ -1637,53 +1578,48 @@ public static FilterProtos.Filter toFilter(Filter filter) throws IOException { /** * Convert a delete KeyValue type to protocol buffer DeleteType. - * * @param type * @return protocol buffer DeleteType * @throws IOException */ - public static DeleteType toDeleteType( - KeyValue.Type type) throws IOException { + public static DeleteType toDeleteType(KeyValue.Type type) throws IOException { switch (type) { - case Delete: - return DeleteType.DELETE_ONE_VERSION; - case DeleteColumn: - return DeleteType.DELETE_MULTIPLE_VERSIONS; - case DeleteFamily: - return DeleteType.DELETE_FAMILY; - case DeleteFamilyVersion: - return DeleteType.DELETE_FAMILY_VERSION; - default: + case Delete: + return DeleteType.DELETE_ONE_VERSION; + case DeleteColumn: + return DeleteType.DELETE_MULTIPLE_VERSIONS; + case DeleteFamily: + return DeleteType.DELETE_FAMILY; + case DeleteFamilyVersion: + return DeleteType.DELETE_FAMILY_VERSION; + default: throw new IOException("Unknown delete type: " + type); } } /** * Convert a protocol buffer DeleteType to delete KeyValue type. - * * @param type The DeleteType * @return The type. * @throws IOException */ - public static KeyValue.Type fromDeleteType( - DeleteType type) throws IOException { + public static KeyValue.Type fromDeleteType(DeleteType type) throws IOException { switch (type) { - case DELETE_ONE_VERSION: - return KeyValue.Type.Delete; - case DELETE_MULTIPLE_VERSIONS: - return KeyValue.Type.DeleteColumn; - case DELETE_FAMILY: - return KeyValue.Type.DeleteFamily; - case DELETE_FAMILY_VERSION: - return KeyValue.Type.DeleteFamilyVersion; - default: - throw new IOException("Unknown delete type: " + type); + case DELETE_ONE_VERSION: + return KeyValue.Type.Delete; + case DELETE_MULTIPLE_VERSIONS: + return KeyValue.Type.DeleteColumn; + case DELETE_FAMILY: + return KeyValue.Type.DeleteFamily; + case DELETE_FAMILY_VERSION: + return KeyValue.Type.DeleteFamilyVersion; + default: + throw new IOException("Unknown delete type: " + type); } } /** * Convert a stringified protocol buffer exception Parameter to a Java Exception - * * @param parameter the protocol buffer Parameter to convert * @return the converted Exception * @throws IOException if failed to deserialize the parameter @@ -1695,7 +1631,7 @@ public static Throwable toException(final NameBytesPair parameter) throws IOExce String type = parameter.getName(); try { Class c = - (Class)Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); + (Class) Class.forName(type, true, ClassLoaderHolder.CLASS_LOADER); Constructor cn = null; try { cn = c.getDeclaredConstructor(String.class); @@ -1710,22 +1646,21 @@ public static Throwable toException(final NameBytesPair parameter) throws IOExce } } -// Start helpers for Client + // Start helpers for Client @SuppressWarnings("unchecked") public static T newServiceStub(Class service, RpcChannel channel) throws Exception { - return (T)Methods.call(service, null, "newStub", - new Class[]{ RpcChannel.class }, new Object[]{ channel }); + return (T) Methods.call(service, null, "newStub", new Class[] { RpcChannel.class }, + new Object[] { channel }); } -// End helpers for Client -// Start helpers for Admin + // End helpers for Client + // Start helpers for Admin /** - * A helper to retrieve region info given a region name or an - * encoded region name using admin protocol. - * + * A helper to retrieve region info given a region name or an encoded region name using admin + * protocol. * @return the retrieved region info */ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( @@ -1733,10 +1668,12 @@ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( final byte[] regionName) throws IOException { try { GetRegionInfoRequest request = - org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName)? - GetRegionInfoRequest.newBuilder().setRegion(RequestConverter. - buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, regionName)).build(): - RequestConverter.buildGetRegionInfoRequest(regionName); + org.apache.hadoop.hbase.client.RegionInfo.isEncodedRegionName(regionName) + ? GetRegionInfoRequest.newBuilder() + .setRegion(RequestConverter + .buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, regionName)) + .build() + : RequestConverter.buildGetRegionInfoRequest(regionName); GetRegionInfoResponse response = admin.getRegionInfo(controller, request); return toRegionInfo(response.getRegionInfo()); } catch (ServiceException se) { @@ -1744,8 +1681,8 @@ public static org.apache.hadoop.hbase.client.RegionInfo getRegionInfo( } } - public static List getRegionLoadInfo( - GetRegionLoadResponse regionLoadResponse) { + public static List + getRegionLoadInfo(GetRegionLoadResponse regionLoadResponse) { List regionLoadList = new ArrayList<>(regionLoadResponse.getRegionLoadsCount()); for (RegionLoad regionLoad : regionLoadResponse.getRegionLoadsList()) { @@ -1755,18 +1692,16 @@ public static List getRegionLoadInfo( } /** - * A helper to close a region given a region name - * using admin protocol. - * + * A helper to close a region given a region name using admin protocol. * @param admin * @param regionName * @throws IOException */ public static void closeRegion(final RpcController controller, final AdminService.BlockingInterface admin, final ServerName server, final byte[] regionName) - throws IOException { + throws IOException { CloseRegionRequest closeRegionRequest = - ProtobufUtil.buildCloseRegionRequest(server, regionName); + ProtobufUtil.buildCloseRegionRequest(server, regionName); try { admin.closeRegion(controller, closeRegionRequest); } catch (ServiceException se) { @@ -1775,19 +1710,17 @@ public static void closeRegion(final RpcController controller, } /** - * A helper to warmup a region given a region name - * using admin protocol - * + * A helper to warmup a region given a region name using admin protocol * @param admin * @param regionInfo - * */ public static void warmupRegion(final RpcController controller, - final AdminService.BlockingInterface admin, final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { + final AdminService.BlockingInterface admin, + final org.apache.hadoop.hbase.client.RegionInfo regionInfo) throws IOException { try { WarmupRegionRequest warmupRegionRequest = - RequestConverter.buildWarmupRegionRequest(regionInfo); + RequestConverter.buildWarmupRegionRequest(regionInfo); admin.warmupRegion(controller, warmupRegionRequest); } catch (ServiceException e) { @@ -1802,10 +1735,9 @@ public static void warmupRegion(final RpcController controller, * @throws IOException */ public static void openRegion(final RpcController controller, - final AdminService.BlockingInterface admin, ServerName server, final org.apache.hadoop.hbase.client.RegionInfo region) - throws IOException { - OpenRegionRequest request = - RequestConverter.buildOpenRegionRequest(server, region, null); + final AdminService.BlockingInterface admin, ServerName server, + final org.apache.hadoop.hbase.client.RegionInfo region) throws IOException { + OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, region, null); try { admin.openRegion(controller, request); } catch (ServiceException se) { @@ -1814,26 +1746,23 @@ public static void openRegion(final RpcController controller, } /** - * A helper to get the all the online regions on a region - * server using admin protocol. - * + * A helper to get the all the online regions on a region server using admin protocol. * @param admin * @return a list of online region info * @throws IOException */ - public static List getOnlineRegions(final AdminService.BlockingInterface admin) - throws IOException { + public static List + getOnlineRegions(final AdminService.BlockingInterface admin) throws IOException { return getOnlineRegions(null, admin); } /** - * A helper to get the all the online regions on a region - * server using admin protocol. + * A helper to get the all the online regions on a region server using admin protocol. * @return a list of online region info */ - public static List getOnlineRegions(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + public static List + getOnlineRegions(final RpcController controller, final AdminService.BlockingInterface admin) + throws IOException { GetOnlineRegionRequest request = RequestConverter.buildGetOnlineRegionRequest(); GetOnlineRegionResponse response = null; try { @@ -1846,14 +1775,15 @@ public static List getOnlineRegions(f /** * Get the list of region info from a GetOnlineRegionResponse - * * @param proto the GetOnlineRegionResponse * @return the list of region info or empty if proto is null */ - public static List getRegionInfos(final GetOnlineRegionResponse proto) { + public static List + getRegionInfos(final GetOnlineRegionResponse proto) { if (proto == null) return Collections.EMPTY_LIST; - List regionInfos = new ArrayList<>(proto.getRegionInfoList().size()); - for (RegionInfo regionInfo: proto.getRegionInfoList()) { + List regionInfos = + new ArrayList<>(proto.getRegionInfoList().size()); + for (RegionInfo regionInfo : proto.getRegionInfoList()) { regionInfos.add(toRegionInfo(regionInfo)); } return regionInfos; @@ -1864,8 +1794,7 @@ public static List getRegionInfos(fin * @return the server name */ public static ServerInfo getServerInfo(final RpcController controller, - final AdminService.BlockingInterface admin) - throws IOException { + final AdminService.BlockingInterface admin) throws IOException { GetServerInfoRequest request = RequestConverter.buildGetServerInfoRequest(); try { GetServerInfoResponse response = admin.getServerInfo(controller, request); @@ -1876,28 +1805,22 @@ public static ServerInfo getServerInfo(final RpcController controller, } /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * + * A helper to get the list of files of a column family on a given region using admin protocol. * @return the list of store files */ public static List getStoreFiles(final AdminService.BlockingInterface admin, - final byte[] regionName, final byte[] family) - throws IOException { + final byte[] regionName, final byte[] family) throws IOException { return getStoreFiles(null, admin, regionName, family); } /** - * A helper to get the list of files of a column family - * on a given region using admin protocol. - * + * A helper to get the list of files of a column family on a given region using admin protocol. * @return the list of store files */ public static List getStoreFiles(final RpcController controller, final AdminService.BlockingInterface admin, final byte[] regionName, final byte[] family) - throws IOException { - GetStoreFileRequest request = - ProtobufUtil.buildGetStoreFileRequest(regionName, family); + throws IOException { + GetStoreFileRequest request = ProtobufUtil.buildGetStoreFileRequest(regionName, family); try { GetStoreFileResponse response = admin.getStoreFile(controller, request); return response.getStoreFileList(); @@ -1906,7 +1829,7 @@ public static List getStoreFiles(final RpcController controller, } } -// End helpers for Admin + // End helpers for Admin /* * Get the total (read + write) requests from a RegionLoad pb @@ -1921,11 +1844,10 @@ public static long getTotalRequestsCount(RegionLoad rl) { return rl.getReadRequestsCount() + rl.getWriteRequestsCount(); } - /** * @param m Message to get delimited pb serialization of (with pb magic prefix) */ - public static byte [] toDelimitedByteArray(final Message m) throws IOException { + public static byte[] toDelimitedByteArray(final Message m) throws IOException { // Allocate arbitrary big size so we avoid resizing. ByteArrayOutputStream baos = new ByteArrayOutputStream(4096); baos.write(PB_MAGIC); @@ -1935,13 +1857,12 @@ public static long getTotalRequestsCount(RegionLoad rl) { /** * Find the HRegion encoded name based on a region specifier - * * @param regionSpecifier the region specifier * @return the corresponding region's encoded name * @throws DoNotRetryIOException if the specifier type is unsupported */ - public static String getRegionEncodedName( - final RegionSpecifier regionSpecifier) throws DoNotRetryIOException { + public static String getRegionEncodedName(final RegionSpecifier regionSpecifier) + throws DoNotRetryIOException { ByteString value = regionSpecifier.getValue(); RegionSpecifierType type = regionSpecifier.getType(); switch (type) { @@ -1950,8 +1871,7 @@ public static String getRegionEncodedName( case ENCODED_REGION_NAME: return value.toStringUtf8(); default: - throw new DoNotRetryIOException( - "Unsupported region specifier type: " + type); + throw new DoNotRetryIOException("Unsupported region specifier type: " + type); } } @@ -1985,8 +1905,8 @@ public static MapReduceProtos.ScanMetrics toScanMetrics(ScanMetrics scanMetrics, } /** - * Unwraps an exception from a protobuf service into the underlying (expected) IOException. - * This method will always throw an exception. + * Unwraps an exception from a protobuf service into the underlying (expected) IOException. This + * method will always throw an exception. * @param se the {@code ServiceException} instance to convert into an {@code IOException} */ public static void toIOException(ServiceException se) throws IOException { @@ -1996,7 +1916,7 @@ public static void toIOException(ServiceException se) throws IOException { Throwable cause = se.getCause(); if (cause != null && cause instanceof IOException) { - throw (IOException)cause; + throw (IOException) cause; } throw new IOException(se); } @@ -2047,13 +1967,10 @@ private static ByteString wrap(ByteBuffer b, int offset, int length) { } public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell cell, - boolean decodeTags) { - ExtendedCellBuilder builder = cellBuilder.clear() - .setRow(cell.getRow().toByteArray()) - .setFamily(cell.getFamily().toByteArray()) - .setQualifier(cell.getQualifier().toByteArray()) - .setTimestamp(cell.getTimestamp()) - .setType((byte) cell.getCellType().getNumber()) + boolean decodeTags) { + ExtendedCellBuilder builder = cellBuilder.clear().setRow(cell.getRow().toByteArray()) + .setFamily(cell.getFamily().toByteArray()).setQualifier(cell.getQualifier().toByteArray()) + .setTimestamp(cell.getTimestamp()).setType((byte) cell.getCellType().getNumber()) .setValue(cell.getValue().toByteArray()); if (decodeTags && cell.hasTags()) { builder.setTags(cell.getTags().toByteArray()); @@ -2063,12 +1980,10 @@ public static Cell toCell(ExtendedCellBuilder cellBuilder, final CellProtos.Cell public static HBaseProtos.NamespaceDescriptor toProtoNamespaceDescriptor(NamespaceDescriptor ns) { HBaseProtos.NamespaceDescriptor.Builder b = - HBaseProtos.NamespaceDescriptor.newBuilder() - .setName(ByteString.copyFromUtf8(ns.getName())); - for(Map.Entry entry: ns.getConfiguration().entrySet()) { - b.addConfiguration(HBaseProtos.NameStringPair.newBuilder() - .setName(entry.getKey()) - .setValue(entry.getValue())); + HBaseProtos.NamespaceDescriptor.newBuilder().setName(ByteString.copyFromUtf8(ns.getName())); + for (Map.Entry entry : ns.getConfiguration().entrySet()) { + b.addConfiguration( + HBaseProtos.NameStringPair.newBuilder().setName(entry.getKey()).setValue(entry.getValue())); } return b.build(); } @@ -2082,25 +1997,25 @@ public static NamespaceDescriptor toNamespaceDescriptor(HBaseProtos.NamespaceDes } public static CompactionDescriptor toCompactionDescriptor( - org.apache.hadoop.hbase.client.RegionInfo info, byte[] family, - List inputPaths, List outputPaths, Path storeDir) { + org.apache.hadoop.hbase.client.RegionInfo info, byte[] family, List inputPaths, + List outputPaths, Path storeDir) { return toCompactionDescriptor(info, null, family, inputPaths, outputPaths, storeDir); } public static CompactionDescriptor toCompactionDescriptor( - org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName, - byte[] family, List inputPaths, List outputPaths, Path storeDir) { + org.apache.hadoop.hbase.client.RegionInfo info, byte[] regionName, byte[] family, + List inputPaths, List outputPaths, Path storeDir) { // compaction descriptor contains relative paths. // input / output paths are relative to the store dir // store dir is relative to region dir CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder() .setTableName(UnsafeByteOperations.unsafeWrap(info.getTable().toBytes())) - .setEncodedRegionName(UnsafeByteOperations.unsafeWrap( - regionName == null ? info.getEncodedNameAsBytes() : regionName)) - .setFamilyName(UnsafeByteOperations.unsafeWrap(family)) - .setStoreHomeDir(storeDir.getName()); //make relative + .setEncodedRegionName(UnsafeByteOperations + .unsafeWrap(regionName == null ? info.getEncodedNameAsBytes() : regionName)) + .setFamilyName(UnsafeByteOperations.unsafeWrap(family)).setStoreHomeDir(storeDir.getName()); // make + // relative for (Path inputPath : inputPaths) { - builder.addCompactionInput(inputPath.getName()); //relative path + builder.addCompactionInput(inputPath.getName()); // relative path } for (Path outputPath : outputPaths) { builder.addCompactionOutput(outputPath.getName()); @@ -2109,10 +2024,10 @@ public static CompactionDescriptor toCompactionDescriptor( return builder.build(); } - public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.hadoop.hbase.client.RegionInfo hri, - long flushSeqId, Map> committedFiles) { - FlushDescriptor.Builder desc = FlushDescriptor.newBuilder() - .setAction(action) + public static FlushDescriptor toFlushDescriptor(FlushAction action, + org.apache.hadoop.hbase.client.RegionInfo hri, long flushSeqId, + Map> committedFiles) { + FlushDescriptor.Builder desc = FlushDescriptor.newBuilder().setAction(action) .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(hri.getEncodedNameAsBytes())) .setRegionName(UnsafeByteOperations.unsafeWrap(hri.getRegionName())) .setFlushSequenceNumber(flushSeqId) @@ -2121,8 +2036,8 @@ public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.h for (Map.Entry> entry : committedFiles.entrySet()) { WALProtos.FlushDescriptor.StoreFlushDescriptor.Builder builder = WALProtos.FlushDescriptor.StoreFlushDescriptor.newBuilder() - .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) - .setStoreHomeDir(Bytes.toString(entry.getKey())); //relative to region + .setFamilyName(UnsafeByteOperations.unsafeWrap(entry.getKey())) + .setStoreHomeDir(Bytes.toString(entry.getKey())); // relative to region if (entry.getValue() != null) { for (Path path : entry.getValue()) { builder.addFlushOutput(path.getName()); @@ -2133,37 +2048,27 @@ public static FlushDescriptor toFlushDescriptor(FlushAction action, org.apache.h return desc.build(); } - public static RegionEventDescriptor toRegionEventDescriptor( - EventType eventType, org.apache.hadoop.hbase.client.RegionInfo hri, long seqId, ServerName server, + public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, + org.apache.hadoop.hbase.client.RegionInfo hri, long seqId, ServerName server, Map> storeFiles) { final byte[] tableNameAsBytes = hri.getTable().getName(); final byte[] encodedNameAsBytes = hri.getEncodedNameAsBytes(); final byte[] regionNameAsBytes = hri.getRegionName(); - return toRegionEventDescriptor(eventType, - tableNameAsBytes, - encodedNameAsBytes, - regionNameAsBytes, - seqId, + return toRegionEventDescriptor(eventType, tableNameAsBytes, encodedNameAsBytes, + regionNameAsBytes, seqId, - server, - storeFiles); + server, storeFiles); } public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, - byte[] tableNameAsBytes, - byte[] encodedNameAsBytes, - byte[] regionNameAsBytes, - long seqId, - - ServerName server, - Map> storeFiles) { - RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder() - .setEventType(eventType) + byte[] tableNameAsBytes, byte[] encodedNameAsBytes, byte[] regionNameAsBytes, long seqId, + + ServerName server, Map> storeFiles) { + RegionEventDescriptor.Builder desc = RegionEventDescriptor.newBuilder().setEventType(eventType) .setTableName(UnsafeByteOperations.unsafeWrap(tableNameAsBytes)) .setEncodedRegionName(UnsafeByteOperations.unsafeWrap(encodedNameAsBytes)) .setRegionName(UnsafeByteOperations.unsafeWrap(regionNameAsBytes)) - .setLogSequenceNumber(seqId) - .setServer(toServerName(server)); + .setLogSequenceNumber(seqId).setServer(toServerName(server)); for (Entry> entry : storeFiles.entrySet()) { StoreDescriptor.Builder builder = StoreDescriptor.newBuilder() @@ -2179,55 +2084,54 @@ public static RegionEventDescriptor toRegionEventDescriptor(EventType eventType, } /** - * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. - * Tries to NOT print out data both because it can be big but also so we do not have data in our - * logs. Use judiciously. + * Return short version of Message toString'd, shorter than TextFormat#shortDebugString. Tries to + * NOT print out data both because it can be big but also so we do not have data in our logs. Use + * judiciously. * @param m * @return toString of passed m */ public static String getShortTextFormat(Message m) { if (m == null) return "null"; if (m instanceof ScanRequest) { - // This should be small and safe to output. No data. + // This should be small and safe to output. No data. return TextFormat.shortDebugString(m); } else if (m instanceof RegionServerReportRequest) { // Print a short message only, just the servername and the requests, not the full load. - RegionServerReportRequest r = (RegionServerReportRequest)m; - return "server " + TextFormat.shortDebugString(r.getServer()) + - " load { numberOfRequests: " + r.getLoad().getNumberOfRequests() + " }"; + RegionServerReportRequest r = (RegionServerReportRequest) m; + return "server " + TextFormat.shortDebugString(r.getServer()) + " load { numberOfRequests: " + + r.getLoad().getNumberOfRequests() + " }"; } else if (m instanceof RegionServerStartupRequest) { // Should be small enough. return TextFormat.shortDebugString(m); } else if (m instanceof MutationProto) { - return toShortString((MutationProto)m); + return toShortString((MutationProto) m); } else if (m instanceof GetRequest) { GetRequest r = (GetRequest) m; - return "region= " + getStringForByteString(r.getRegion().getValue()) + - ", row=" + getStringForByteString(r.getGet().getRow()); + return "region= " + getStringForByteString(r.getRegion().getValue()) + ", row=" + + getStringForByteString(r.getGet().getRow()); } else if (m instanceof ClientProtos.MultiRequest) { ClientProtos.MultiRequest r = (ClientProtos.MultiRequest) m; // Get the number of Actions - int actionsCount = r.getRegionActionList() - .stream() - .mapToInt(ClientProtos.RegionAction::getActionCount) - .sum(); + int actionsCount = r.getRegionActionList().stream() + .mapToInt(ClientProtos.RegionAction::getActionCount).sum(); // Get first set of Actions. ClientProtos.RegionAction actions = r.getRegionActionList().get(0); - String row = actions.getActionCount() <= 0? "": - getStringForByteString(actions.getAction(0).hasGet()? - actions.getAction(0).getGet().getRow(): - actions.getAction(0).getMutation().getRow()); - return "region= " + getStringForByteString(actions.getRegion().getValue()) + - ", for " + actionsCount + " action(s) and 1st row key=" + row; + String row = actions.getActionCount() <= 0 ? "" + : getStringForByteString( + actions.getAction(0).hasGet() ? actions.getAction(0).getGet().getRow() + : actions.getAction(0).getMutation().getRow()); + return "region= " + getStringForByteString(actions.getRegion().getValue()) + ", for " + + actionsCount + " action(s) and 1st row key=" + row; } else if (m instanceof ClientProtos.MutateRequest) { ClientProtos.MutateRequest r = (ClientProtos.MutateRequest) m; - return "region= " + getStringForByteString(r.getRegion().getValue()) + - ", row=" + getStringForByteString(r.getMutation().getRow()); + return "region= " + getStringForByteString(r.getRegion().getValue()) + ", row=" + + getStringForByteString(r.getMutation().getRow()); } else if (m instanceof ClientProtos.CoprocessorServiceRequest) { ClientProtos.CoprocessorServiceRequest r = (ClientProtos.CoprocessorServiceRequest) m; - return "coprocessorService= " + r.getCall().getServiceName() + ":" + r.getCall().getMethodName(); + return "coprocessorService= " + r.getCall().getServiceName() + ":" + + r.getCall().getMethodName(); } return "TODO: " + m.getClass().toString(); } @@ -2238,7 +2142,6 @@ private static String getStringForByteString(ByteString bs) { /** * Return SlowLogParams to maintain recent online slowlog responses - * * @param message Message object {@link Message} * @return SlowLogParams with regionName(for filter queries) and params */ @@ -2259,14 +2162,12 @@ public static SlowLogParams getSlowLogParams(Message message) { GetRequest getRequest = (GetRequest) message; String regionName = getStringForByteString(getRequest.getRegion().getValue()); String params = "region= " + regionName + ", row= " - + getStringForByteString(getRequest.getGet().getRow()); + + getStringForByteString(getRequest.getGet().getRow()); return new SlowLogParams(regionName, params); } else if (message instanceof MultiRequest) { MultiRequest multiRequest = (MultiRequest) message; - int actionsCount = multiRequest.getRegionActionList() - .stream() - .mapToInt(ClientProtos.RegionAction::getActionCount) - .sum(); + int actionsCount = multiRequest.getRegionActionList().stream() + .mapToInt(ClientProtos.RegionAction::getActionCount).sum(); RegionAction actions = multiRequest.getRegionActionList().get(0); String regionName = getStringForByteString(actions.getRegion().getValue()); String params = "region= " + regionName + ", for " + actionsCount + " action(s)"; @@ -2278,9 +2179,8 @@ public static SlowLogParams getSlowLogParams(Message message) { return new SlowLogParams(regionName, params); } else if (message instanceof CoprocessorServiceRequest) { CoprocessorServiceRequest coprocessorServiceRequest = (CoprocessorServiceRequest) message; - String params = "coprocessorService= " - + coprocessorServiceRequest.getCall().getServiceName() - + ":" + coprocessorServiceRequest.getCall().getMethodName(); + String params = "coprocessorService= " + coprocessorServiceRequest.getCall().getServiceName() + + ":" + coprocessorServiceRequest.getCall().getMethodName(); return new SlowLogParams(params); } String params = message.getClass().toString(); @@ -2293,13 +2193,13 @@ public static SlowLogParams getSlowLogParams(Message message) { * @return Short String of mutation proto */ static String toShortString(final MutationProto proto) { - return "row=" + Bytes.toString(proto.getRow().toByteArray()) + - ", type=" + proto.getMutateType().toString(); + return "row=" + Bytes.toString(proto.getRow().toByteArray()) + ", type=" + + proto.getMutateType().toString(); } public static TableName toTableName(HBaseProtos.TableName tableNamePB) { return TableName.valueOf(tableNamePB.getNamespace().asReadOnlyByteBuffer(), - tableNamePB.getQualifier().asReadOnlyByteBuffer()); + tableNamePB.getQualifier().asReadOnlyByteBuffer()); } public static HBaseProtos.TableName toProtoTableName(TableName tableName) { @@ -2335,7 +2235,6 @@ public static TableName[] getTableNameArray(List tableNam /** * Convert a protocol buffer CellVisibility to a client CellVisibility - * * @param proto * @return the converted client CellVisibility */ @@ -2346,7 +2245,6 @@ public static CellVisibility toCellVisibility(ClientProtos.CellVisibility proto) /** * Convert a protocol buffer CellVisibility bytes to a client CellVisibility - * * @param protoBytes * @return the converted client CellVisibility * @throws DeserializationException @@ -2366,7 +2264,6 @@ public static CellVisibility toCellVisibility(byte[] protoBytes) throws Deserial /** * Create a protocol buffer CellVisibility based on a client CellVisibility. - * * @param cellVisibility * @return a protocol buffer CellVisibility */ @@ -2378,7 +2275,6 @@ public static ClientProtos.CellVisibility toCellVisibility(CellVisibility cellVi /** * Convert a protocol buffer Authorizations to a client Authorizations - * * @param proto * @return the converted client Authorizations */ @@ -2389,7 +2285,6 @@ public static Authorizations toAuthorizations(ClientProtos.Authorizations proto) /** * Convert a protocol buffer Authorizations bytes to a client Authorizations - * * @param protoBytes * @return the converted client Authorizations * @throws DeserializationException @@ -2409,7 +2304,6 @@ public static Authorizations toAuthorizations(byte[] protoBytes) throws Deserial /** * Create a protocol buffer Authorizations based on a client Authorizations. - * * @param authorizations * @return a protocol buffer Authorizations */ @@ -2423,45 +2317,56 @@ public static ClientProtos.Authorizations toAuthorizations(Authorizations author /** * Convert a protocol buffer TimeUnit to a client TimeUnit - * * @param proto * @return the converted client TimeUnit */ public static TimeUnit toTimeUnit(final HBaseProtos.TimeUnit proto) { switch (proto) { - case NANOSECONDS: return TimeUnit.NANOSECONDS; - case MICROSECONDS: return TimeUnit.MICROSECONDS; - case MILLISECONDS: return TimeUnit.MILLISECONDS; - case SECONDS: return TimeUnit.SECONDS; - case MINUTES: return TimeUnit.MINUTES; - case HOURS: return TimeUnit.HOURS; - case DAYS: return TimeUnit.DAYS; + case NANOSECONDS: + return TimeUnit.NANOSECONDS; + case MICROSECONDS: + return TimeUnit.MICROSECONDS; + case MILLISECONDS: + return TimeUnit.MILLISECONDS; + case SECONDS: + return TimeUnit.SECONDS; + case MINUTES: + return TimeUnit.MINUTES; + case HOURS: + return TimeUnit.HOURS; + case DAYS: + return TimeUnit.DAYS; } throw new RuntimeException("Invalid TimeUnit " + proto); } /** * Convert a client TimeUnit to a protocol buffer TimeUnit - * * @param timeUnit * @return the converted protocol buffer TimeUnit */ public static HBaseProtos.TimeUnit toProtoTimeUnit(final TimeUnit timeUnit) { switch (timeUnit) { - case NANOSECONDS: return HBaseProtos.TimeUnit.NANOSECONDS; - case MICROSECONDS: return HBaseProtos.TimeUnit.MICROSECONDS; - case MILLISECONDS: return HBaseProtos.TimeUnit.MILLISECONDS; - case SECONDS: return HBaseProtos.TimeUnit.SECONDS; - case MINUTES: return HBaseProtos.TimeUnit.MINUTES; - case HOURS: return HBaseProtos.TimeUnit.HOURS; - case DAYS: return HBaseProtos.TimeUnit.DAYS; + case NANOSECONDS: + return HBaseProtos.TimeUnit.NANOSECONDS; + case MICROSECONDS: + return HBaseProtos.TimeUnit.MICROSECONDS; + case MILLISECONDS: + return HBaseProtos.TimeUnit.MILLISECONDS; + case SECONDS: + return HBaseProtos.TimeUnit.SECONDS; + case MINUTES: + return HBaseProtos.TimeUnit.MINUTES; + case HOURS: + return HBaseProtos.TimeUnit.HOURS; + case DAYS: + return HBaseProtos.TimeUnit.DAYS; } throw new RuntimeException("Invalid TimeUnit " + timeUnit); } /** * Convert a protocol buffer ThrottleType to a client ThrottleType - * * @param proto * @return the converted client ThrottleType */ @@ -2492,7 +2397,6 @@ public static ThrottleType toThrottleType(final QuotaProtos.ThrottleType proto) /** * Convert a client ThrottleType to a protocol buffer ThrottleType - * * @param type * @return the converted protocol buffer ThrottleType */ @@ -2523,97 +2427,107 @@ public static QuotaProtos.ThrottleType toProtoThrottleType(final ThrottleType ty /** * Convert a protocol buffer QuotaScope to a client QuotaScope - * * @param proto * @return the converted client QuotaScope */ public static QuotaScope toQuotaScope(final QuotaProtos.QuotaScope proto) { switch (proto) { - case CLUSTER: return QuotaScope.CLUSTER; - case MACHINE: return QuotaScope.MACHINE; + case CLUSTER: + return QuotaScope.CLUSTER; + case MACHINE: + return QuotaScope.MACHINE; } throw new RuntimeException("Invalid QuotaScope " + proto); } /** * Convert a client QuotaScope to a protocol buffer QuotaScope - * * @param scope * @return the converted protocol buffer QuotaScope */ public static QuotaProtos.QuotaScope toProtoQuotaScope(final QuotaScope scope) { switch (scope) { - case CLUSTER: return QuotaProtos.QuotaScope.CLUSTER; - case MACHINE: return QuotaProtos.QuotaScope.MACHINE; + case CLUSTER: + return QuotaProtos.QuotaScope.CLUSTER; + case MACHINE: + return QuotaProtos.QuotaScope.MACHINE; } throw new RuntimeException("Invalid QuotaScope " + scope); } /** * Convert a protocol buffer QuotaType to a client QuotaType - * * @param proto * @return the converted client QuotaType */ public static QuotaType toQuotaScope(final QuotaProtos.QuotaType proto) { switch (proto) { - case THROTTLE: return QuotaType.THROTTLE; - case SPACE: return QuotaType.SPACE; + case THROTTLE: + return QuotaType.THROTTLE; + case SPACE: + return QuotaType.SPACE; } throw new RuntimeException("Invalid QuotaType " + proto); } /** * Convert a client QuotaType to a protocol buffer QuotaType - * * @param type * @return the converted protocol buffer QuotaType */ public static QuotaProtos.QuotaType toProtoQuotaScope(final QuotaType type) { switch (type) { - case THROTTLE: return QuotaProtos.QuotaType.THROTTLE; - case SPACE: return QuotaProtos.QuotaType.SPACE; - default: throw new RuntimeException("Invalid QuotaType " + type); + case THROTTLE: + return QuotaProtos.QuotaType.THROTTLE; + case SPACE: + return QuotaProtos.QuotaType.SPACE; + default: + throw new RuntimeException("Invalid QuotaType " + type); } } /** * Converts a protocol buffer SpaceViolationPolicy to a client SpaceViolationPolicy. - * * @param proto The protocol buffer space violation policy. * @return The corresponding client SpaceViolationPolicy. */ - public static SpaceViolationPolicy toViolationPolicy( - final QuotaProtos.SpaceViolationPolicy proto) { + public static SpaceViolationPolicy + toViolationPolicy(final QuotaProtos.SpaceViolationPolicy proto) { switch (proto) { - case DISABLE: return SpaceViolationPolicy.DISABLE; - case NO_WRITES_COMPACTIONS: return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - case NO_WRITES: return SpaceViolationPolicy.NO_WRITES; - case NO_INSERTS: return SpaceViolationPolicy.NO_INSERTS; + case DISABLE: + return SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: + return SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: + return SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: + return SpaceViolationPolicy.NO_INSERTS; } throw new RuntimeException("Invalid SpaceViolationPolicy " + proto); } /** * Converts a client SpaceViolationPolicy to a protocol buffer SpaceViolationPolicy. - * * @param policy The client SpaceViolationPolicy object. * @return The corresponding protocol buffer SpaceViolationPolicy. */ - public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy( - final SpaceViolationPolicy policy) { + public static QuotaProtos.SpaceViolationPolicy + toProtoViolationPolicy(final SpaceViolationPolicy policy) { switch (policy) { - case DISABLE: return QuotaProtos.SpaceViolationPolicy.DISABLE; - case NO_WRITES_COMPACTIONS: return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; - case NO_WRITES: return QuotaProtos.SpaceViolationPolicy.NO_WRITES; - case NO_INSERTS: return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; + case DISABLE: + return QuotaProtos.SpaceViolationPolicy.DISABLE; + case NO_WRITES_COMPACTIONS: + return QuotaProtos.SpaceViolationPolicy.NO_WRITES_COMPACTIONS; + case NO_WRITES: + return QuotaProtos.SpaceViolationPolicy.NO_WRITES; + case NO_INSERTS: + return QuotaProtos.SpaceViolationPolicy.NO_INSERTS; } throw new RuntimeException("Invalid SpaceViolationPolicy " + policy); } /** * Build a protocol buffer TimedQuota - * * @param limit the allowed number of request/data per timeUnit * @param timeUnit the limit time unit * @param scope the quota scope @@ -2621,58 +2535,48 @@ public static QuotaProtos.SpaceViolationPolicy toProtoViolationPolicy( */ public static QuotaProtos.TimedQuota toTimedQuota(final long limit, final TimeUnit timeUnit, final QuotaScope scope) { - return QuotaProtos.TimedQuota.newBuilder() - .setSoftLimit(limit) - .setTimeUnit(toProtoTimeUnit(timeUnit)) - .setScope(toProtoQuotaScope(scope)) - .build(); + return QuotaProtos.TimedQuota.newBuilder().setSoftLimit(limit) + .setTimeUnit(toProtoTimeUnit(timeUnit)).setScope(toProtoQuotaScope(scope)).build(); } /** * Builds a protocol buffer SpaceQuota. - * * @param limit The maximum space usage for the quota in bytes. * @param violationPolicy The policy to apply when the quota is violated. * @return The protocol buffer SpaceQuota. */ - public static QuotaProtos.SpaceQuota toProtoSpaceQuota( - final long limit, final SpaceViolationPolicy violationPolicy) { - return QuotaProtos.SpaceQuota.newBuilder() - .setSoftLimit(limit) - .setViolationPolicy(toProtoViolationPolicy(violationPolicy)) - .build(); + public static QuotaProtos.SpaceQuota toProtoSpaceQuota(final long limit, + final SpaceViolationPolicy violationPolicy) { + return QuotaProtos.SpaceQuota.newBuilder().setSoftLimit(limit) + .setViolationPolicy(toProtoViolationPolicy(violationPolicy)).build(); } /** - * Generates a marker for the WAL so that we propagate the notion of a bulk region load - * throughout the WAL. - * - * @param tableName The tableName into which the bulk load is being imported into. + * Generates a marker for the WAL so that we propagate the notion of a bulk region load throughout + * the WAL. + * @param tableName The tableName into which the bulk load is being imported into. * @param encodedRegionName Encoded region name of the region which is being bulk loaded. - * @param storeFiles A set of store files of a column family are bulk loaded. - * @param storeFilesSize Map of store files and their lengths - * @param bulkloadSeqId sequence ID (by a force flush) used to create bulk load hfile - * name + * @param storeFiles A set of store files of a column family are bulk loaded. + * @param storeFilesSize Map of store files and their lengths + * @param bulkloadSeqId sequence ID (by a force flush) used to create bulk load hfile name * @return The WAL log marker for bulk loads. */ public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableName, - ByteString encodedRegionName, Map> storeFiles, - Map storeFilesSize, long bulkloadSeqId) { - return toBulkLoadDescriptor(tableName, encodedRegionName, storeFiles, - storeFilesSize, bulkloadSeqId, null, true); + ByteString encodedRegionName, Map> storeFiles, + Map storeFilesSize, long bulkloadSeqId) { + return toBulkLoadDescriptor(tableName, encodedRegionName, storeFiles, storeFilesSize, + bulkloadSeqId, null, true); } public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableName, ByteString encodedRegionName, Map> storeFiles, - Map storeFilesSize, long bulkloadSeqId, - List clusterIds, boolean replicate) { + Map storeFilesSize, long bulkloadSeqId, List clusterIds, + boolean replicate) { BulkLoadDescriptor.Builder desc = - BulkLoadDescriptor.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .setEncodedRegionName(encodedRegionName) - .setBulkloadSeqNum(bulkloadSeqId) - .setReplicate(replicate); - if(clusterIds != null) { + BulkLoadDescriptor.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)) + .setEncodedRegionName(encodedRegionName).setBulkloadSeqNum(bulkloadSeqId) + .setReplicate(replicate); + if (clusterIds != null) { desc.addAllClusterIds(clusterIds); } @@ -2700,7 +2604,7 @@ public static WALProtos.BulkLoadDescriptor toBulkLoadDescriptor(TableName tableN * @throws IOException */ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) - throws IOException { + throws IOException { // This used to be builder.mergeDelimitedFrom(in); // but is replaced to allow us to bump the protobuf size limit. final int firstByte = in.read(); @@ -2715,8 +2619,8 @@ public static void mergeDelimitedFrom(Message.Builder builder, InputStream in) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers where the message size is known + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers + * where the message size is known * @param builder current message builder * @param in InputStream containing protobuf data * @param size known size of protobuf data @@ -2731,14 +2635,13 @@ public static void mergeFrom(Message.Builder builder, InputStream in, int size) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers where the message size is not known + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers + * where the message size is not known * @param builder current message builder * @param in InputStream containing protobuf data * @throws IOException */ - public static void mergeFrom(Message.Builder builder, InputStream in) - throws IOException { + public static void mergeFrom(Message.Builder builder, InputStream in) throws IOException { final CodedInputStream codedInput = CodedInputStream.newInstance(in); codedInput.setSizeLimit(Integer.MAX_VALUE); builder.mergeFrom(codedInput); @@ -2746,8 +2649,8 @@ public static void mergeFrom(Message.Builder builder, InputStream in) } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with ByteStrings + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with ByteStrings * @param builder current message builder * @param bs ByteString containing the * @throws IOException @@ -2760,8 +2663,8 @@ public static void mergeFrom(Message.Builder builder, ByteString bs) throws IOEx } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder * @param b byte array * @throws IOException @@ -2774,8 +2677,8 @@ public static void mergeFrom(Message.Builder builder, byte[] b) throws IOExcepti } /** - * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding - * buffers when working with byte arrays + * This version of protobuf's mergeFrom avoids the hard-coded 64MB limit for decoding buffers when + * working with byte arrays * @param builder current message builder * @param b byte array * @param offset @@ -2803,37 +2706,32 @@ public static void mergeFrom(Message.Builder builder, CodedInputStream codedInpu codedInput.setSizeLimit(prevLimit); } - public static ReplicationLoadSink toReplicationLoadSink( - ClusterStatusProtos.ReplicationLoadSink rls) { + public static ReplicationLoadSink + toReplicationLoadSink(ClusterStatusProtos.ReplicationLoadSink rls) { ReplicationLoadSink.ReplicationLoadSinkBuilder builder = ReplicationLoadSink.newBuilder(); - builder.setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()). - setTimestampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp()). - setTimestampStarted(rls.hasTimestampStarted()? rls.getTimestampStarted(): -1L). - setTotalOpsProcessed(rls.hasTotalOpsProcessed()? rls.getTotalOpsProcessed(): -1L); + builder.setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) + .setTimestampsOfLastAppliedOp(rls.getTimeStampsOfLastAppliedOp()) + .setTimestampStarted(rls.hasTimestampStarted() ? rls.getTimestampStarted() : -1L) + .setTotalOpsProcessed(rls.hasTotalOpsProcessed() ? rls.getTotalOpsProcessed() : -1L); return builder.build(); } - public static ReplicationLoadSource toReplicationLoadSource( - ClusterStatusProtos.ReplicationLoadSource rls) { + public static ReplicationLoadSource + toReplicationLoadSource(ClusterStatusProtos.ReplicationLoadSource rls) { ReplicationLoadSource.ReplicationLoadSourceBuilder builder = ReplicationLoadSource.newBuilder(); - builder.setPeerID(rls.getPeerID()). - setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()). - setSizeOfLogQueue(rls.getSizeOfLogQueue()). - setTimestampOfLastShippedOp(rls.getTimeStampOfLastShippedOp()). - setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()). - setReplicationLag(rls.getReplicationLag()). - setQueueId(rls.getQueueId()). - setRecovered(rls.getRecovered()). - setRunning(rls.getRunning()). - setEditsSinceRestart(rls.getEditsSinceRestart()). - setEditsRead(rls.getEditsRead()). - setoPsShipped(rls.getOPsShipped()); + builder.setPeerID(rls.getPeerID()).setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) + .setSizeOfLogQueue(rls.getSizeOfLogQueue()) + .setTimestampOfLastShippedOp(rls.getTimeStampOfLastShippedOp()) + .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) + .setReplicationLag(rls.getReplicationLag()).setQueueId(rls.getQueueId()) + .setRecovered(rls.getRecovered()).setRunning(rls.getRunning()) + .setEditsSinceRestart(rls.getEditsSinceRestart()).setEditsRead(rls.getEditsRead()) + .setoPsShipped(rls.getOPsShipped()); return builder.build(); } /** * Get a protocol buffer VersionInfo - * * @return the converted protocol buffer VersionInfo */ public static HBaseProtos.VersionInfo getVersionInfo() { @@ -2861,7 +2759,7 @@ public static HBaseProtos.VersionInfo getVersionInfo() { public static List toSecurityCapabilityList( List capabilities) { List scList = new ArrayList<>(capabilities.size()); - for (MasterProtos.SecurityCapabilitiesResponse.Capability c: capabilities) { + for (MasterProtos.SecurityCapabilitiesResponse.Capability c : capabilities) { try { scList.add(SecurityCapability.valueOf(c.getNumber())); } catch (IllegalArgumentException e) { @@ -2873,10 +2771,9 @@ public static List toSecurityCapabilityList( } public static TimeRange toTimeRange(HBaseProtos.TimeRange timeRange) { - return timeRange == null ? - TimeRange.allTime() : - new TimeRange(timeRange.hasFrom() ? timeRange.getFrom() : 0, - timeRange.hasTo() ? timeRange.getTo() : Long.MAX_VALUE); + return timeRange == null ? TimeRange.allTime() + : new TimeRange(timeRange.hasFrom() ? timeRange.getFrom() : 0, + timeRange.hasTo() ? timeRange.getTo() : Long.MAX_VALUE); } /** @@ -2909,11 +2806,12 @@ public static ColumnFamilySchema toColumnFamilySchema(ColumnFamilyDescriptor hcd */ public static ColumnFamilyDescriptor toColumnFamilyDescriptor(final ColumnFamilySchema cfs) { // Use the empty constructor so we preserve the initial values set on construction for things - // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for + // like maxVersion. Otherwise, we pick up wrong values on deserialization which makes for // unrelated-looking test failures that are hard to trace back to here. - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); - cfs.getAttributesList().forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(cfs.getName().toByteArray()); + cfs.getAttributesList() + .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); cfs.getConfigurationList().forEach(a -> builder.setConfiguration(a.getName(), a.getValue())); return builder.build(); } @@ -2944,16 +2842,13 @@ public static TableSchema toTableSchema(TableDescriptor htd) { * @return An {@link TableDescriptor} made from the passed in pb ts. */ public static TableDescriptor toTableDescriptor(final TableSchema ts) { - TableDescriptorBuilder builder - = TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); - ts.getColumnFamiliesList() - .stream() - .map(ProtobufUtil::toColumnFamilyDescriptor) - .forEach(builder::setColumnFamily); + TableDescriptorBuilder builder = + TableDescriptorBuilder.newBuilder(ProtobufUtil.toTableName(ts.getTableName())); + ts.getColumnFamiliesList().stream().map(ProtobufUtil::toColumnFamilyDescriptor) + .forEach(builder::setColumnFamily); ts.getAttributesList() - .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); - ts.getConfigurationList() - .forEach(a -> builder.setValue(a.getName(), a.getValue())); + .forEach(a -> builder.setValue(a.getFirst().toByteArray(), a.getSecond().toByteArray())); + ts.getConfigurationList().forEach(a -> builder.setValue(a.getName(), a.getValue())); return builder.build(); } @@ -2974,18 +2869,18 @@ public static GetRegionInfoResponse.CompactionState createCompactionState(Compac /** * Creates {@link CompactionState} from - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos - * .RegionLoad.CompactionState} state + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos .RegionLoad.CompactionState} + * state * @param state the protobuf CompactionState * @return CompactionState */ - public static CompactionState createCompactionStateForRegionLoad( - RegionLoad.CompactionState state) { + public static CompactionState + createCompactionStateForRegionLoad(RegionLoad.CompactionState state) { return CompactionState.valueOf(state.toString()); } - public static RegionLoad.CompactionState createCompactionStateForRegionLoad( - CompactionState state) { + public static RegionLoad.CompactionState + createCompactionStateForRegionLoad(CompactionState state) { return RegionLoad.CompactionState.valueOf(state.toString()); } @@ -3034,7 +2929,8 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription */ public static SnapshotProtos.SnapshotDescription createHBaseProtosSnapshotDesc(SnapshotDescription snapshotDesc) { - SnapshotProtos.SnapshotDescription.Builder builder = SnapshotProtos.SnapshotDescription.newBuilder(); + SnapshotProtos.SnapshotDescription.Builder builder = + SnapshotProtos.SnapshotDescription.newBuilder(); if (snapshotDesc.getTableName() != null) { builder.setTable(snapshotDesc.getTableNameAsString()); } @@ -3047,8 +2943,8 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription if (snapshotDesc.getCreationTime() != -1L) { builder.setCreationTime(snapshotDesc.getCreationTime()); } - if (snapshotDesc.getTtl() != -1L && - snapshotDesc.getTtl() < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { + if (snapshotDesc.getTtl() != -1L + && snapshotDesc.getTtl() < TimeUnit.MILLISECONDS.toSeconds(Long.MAX_VALUE)) { builder.setTtl(snapshotDesc.getTtl()); } if (snapshotDesc.getVersion() != -1) { @@ -3062,9 +2958,8 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription } /** - * Convert from - * {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} to - * {@link SnapshotDescription} + * Convert from {@link org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription} + * to {@link SnapshotDescription} * @param snapshotDesc the protobuf SnapshotDescription * @return the POJO SnapshotDescription */ @@ -3074,9 +2969,9 @@ public static SnapshotType createSnapshotType(SnapshotProtos.SnapshotDescription snapshotProps.put("TTL", snapshotDesc.getTtl()); snapshotProps.put(TableDescriptorBuilder.MAX_FILESIZE, snapshotDesc.getMaxFileSize()); return new SnapshotDescription(snapshotDesc.getName(), - snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null, - createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(), - snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), snapshotProps); + snapshotDesc.hasTable() ? TableName.valueOf(snapshotDesc.getTable()) : null, + createSnapshotType(snapshotDesc.getType()), snapshotDesc.getOwner(), + snapshotDesc.getCreationTime(), snapshotDesc.getVersion(), snapshotProps); } public static RegionLoadStats createRegionLoadStats(ClientProtos.RegionLoadStats stats) { @@ -3092,7 +2987,7 @@ public static String toText(Message msg) { return TextFormat.shortDebugString(msg); } - public static byte [] toBytes(ByteString bs) { + public static byte[] toBytes(ByteString bs) { return bs.toByteArray(); } @@ -3109,21 +3004,20 @@ public static T call(Callable callable) throws IOException { } /** - * Create a protocol buffer GetStoreFileRequest for a given region name - * - * @param regionName the name of the region to get info - * @param family the family to get store file list - * @return a protocol buffer GetStoreFileRequest - */ - public static GetStoreFileRequest - buildGetStoreFileRequest(final byte[] regionName, final byte[] family) { - GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); - builder.setRegion(region); - builder.addFamily(UnsafeByteOperations.unsafeWrap(family)); - return builder.build(); - } + * Create a protocol buffer GetStoreFileRequest for a given region name + * @param regionName the name of the region to get info + * @param family the family to get store file list + * @return a protocol buffer GetStoreFileRequest + */ + public static GetStoreFileRequest buildGetStoreFileRequest(final byte[] regionName, + final byte[] family) { + GetStoreFileRequest.Builder builder = GetStoreFileRequest.newBuilder(); + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + builder.setRegion(region); + builder.addFamily(UnsafeByteOperations.unsafeWrap(family)); + return builder.build(); + } /** * Create a CloseRegionRequest for a given region name @@ -3143,7 +3037,7 @@ public static CloseRegionRequest buildCloseRegionRequest(ServerName server, byte ServerName destinationServer, long closeProcId) { CloseRegionRequest.Builder builder = CloseRegionRequest.newBuilder(); RegionSpecifier region = - RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (destinationServer != null) { builder.setDestinationServer(toServerName(destinationServer)); @@ -3167,8 +3061,8 @@ public static ProcedureDescription buildProcedureDescription(String signature, S } /** - * Get the Meta region state from the passed data bytes. Can handle both old and new style - * server names. + * Get the Meta region state from the passed data bytes. Can handle both old and new style server + * names. * @param data protobuf serialized data with meta server name. * @param replicaId replica ID for this region * @return RegionState instance corresponding to the serialized data. @@ -3181,15 +3075,13 @@ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replic if (data != null && data.length > 0 && ProtobufUtil.isPBMagicPrefix(data)) { try { int prefixLen = ProtobufUtil.lengthOfPBMagic(); - ZooKeeperProtos.MetaRegionServer rl = - ZooKeeperProtos.MetaRegionServer.parser().parseFrom(data, prefixLen, - data.length - prefixLen); + ZooKeeperProtos.MetaRegionServer rl = ZooKeeperProtos.MetaRegionServer.parser() + .parseFrom(data, prefixLen, data.length - prefixLen); if (rl.hasState()) { state = RegionState.State.convert(rl.getState()); } HBaseProtos.ServerName sn = rl.getServer(); - serverName = ServerName.valueOf( - sn.getHostName(), sn.getPort(), sn.getStartCode()); + serverName = ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); } catch (InvalidProtocolBufferException e) { throw new DeserializationException("Unable to parse meta region location"); } @@ -3201,34 +3093,34 @@ public static RegionState parseMetaRegionStateFrom(final byte[] data, int replic state = RegionState.State.OFFLINE; } return new RegionState(RegionReplicaUtil.getRegionInfoForReplica( - RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName); + RegionInfoBuilder.FIRST_META_REGIONINFO, replicaId), state, serverName); } /** * Get a ServerName from the passed in data bytes. - * @param data Data with a serialize server name in it; can handle the old style - * servername where servername was host and port. Works too with data that - * begins w/ the pb 'PBUF' magic and that is then followed by a protobuf that - * has a serialized {@link ServerName} in it. - * @return Returns null if data is null else converts passed data - * to a ServerName instance. + * @param data Data with a serialize server name in it; can handle the old style servername where + * servername was host and port. Works too with data that begins w/ the pb 'PBUF' magic + * and that is then followed by a protobuf that has a serialized {@link ServerName} in + * it. + * @return Returns null if data is null else converts passed data to a ServerName + * instance. * @throws DeserializationException */ - public static ServerName parseServerNameFrom(final byte [] data) throws DeserializationException { + public static ServerName parseServerNameFrom(final byte[] data) throws DeserializationException { if (data == null || data.length <= 0) return null; if (ProtobufMagic.isPBMagicPrefix(data)) { int prefixLen = ProtobufMagic.lengthOfPBMagic(); try { ZooKeeperProtos.Master rss = - ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); + ZooKeeperProtos.Master.PARSER.parseFrom(data, prefixLen, data.length - prefixLen); org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.ServerName sn = rss.getMaster(); return ServerName.valueOf(sn.getHostName(), sn.getPort(), sn.getStartCode()); - } catch (/*InvalidProtocolBufferException*/IOException e) { + } catch (/* InvalidProtocolBufferException */IOException e) { // A failed parse of the znode is pretty catastrophic. Rather than loop // retrying hoping the bad bytes will changes, and rather than change // the signature on this method to add an IOE which will send ripples all - // over the code base, throw a RuntimeException. This should "never" happen. + // over the code base, throw a RuntimeException. This should "never" happen. // Fail fast if it does. throw new DeserializationException(e); } @@ -3269,7 +3161,8 @@ public static String toLockJson(List lockedRes JsonArray lockedResourceJsons = new JsonArray(lockedResourceProtos.size()); for (LockServiceProtos.LockedResource lockedResourceProto : lockedResourceProtos) { try { - JsonElement lockedResourceJson = ProtobufMessageConverter.toJsonElement(lockedResourceProto); + JsonElement lockedResourceJson = + ProtobufMessageConverter.toJsonElement(lockedResourceProto); lockedResourceJsons.add(lockedResourceJson); } catch (InvalidProtocolBufferException e) { lockedResourceJsons.add(e.toString()); @@ -3280,11 +3173,11 @@ public static String toLockJson(List lockedRes /** * Convert a RegionInfo to a Proto RegionInfo - * * @param info the RegionInfo to convert * @return the converted Proto RegionInfo */ - public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase.client.RegionInfo info) { + public static HBaseProtos.RegionInfo + toRegionInfo(final org.apache.hadoop.hbase.client.RegionInfo info) { if (info == null) { return null; } @@ -3305,18 +3198,18 @@ public static HBaseProtos.RegionInfo toRegionInfo(final org.apache.hadoop.hbase. /** * Convert HBaseProto.RegionInfo to a RegionInfo - * * @param proto the RegionInfo to convert * @return the converted RegionInfo */ - public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBaseProtos.RegionInfo proto) { + public static org.apache.hadoop.hbase.client.RegionInfo + toRegionInfo(final HBaseProtos.RegionInfo proto) { if (proto == null) { return null; } TableName tableName = ProtobufUtil.toTableName(proto.getTableName()); long regionId = proto.getRegionId(); int defaultReplicaId = org.apache.hadoop.hbase.client.RegionInfo.DEFAULT_REPLICA_ID; - int replicaId = proto.hasReplicaId()? proto.getReplicaId(): defaultReplicaId; + int replicaId = proto.hasReplicaId() ? proto.getReplicaId() : defaultReplicaId; if (tableName.equals(TableName.META_TABLE_NAME) && replicaId == defaultReplicaId) { return RegionInfoBuilder.FIRST_META_REGIONINFO; } @@ -3332,12 +3225,8 @@ public static org.apache.hadoop.hbase.client.RegionInfo toRegionInfo(final HBase if (proto.hasSplit()) { split = proto.getSplit(); } - RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName) - .setStartKey(startKey) - .setEndKey(endKey) - .setRegionId(regionId) - .setReplicaId(replicaId) - .setSplit(split); + RegionInfoBuilder rib = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey) + .setEndKey(endKey).setRegionId(regionId).setReplicaId(replicaId).setSplit(split); if (proto.hasOffline()) { rib.setOffline(proto.getOffline()); } @@ -3360,18 +3249,17 @@ public static HRegionLocation toRegionLocation(HBaseProtos.RegionLocation proto) return new HRegionLocation(regionInfo, serverName, proto.getSeqNum()); } - public static List toSnapshotDescriptionList( - GetCompletedSnapshotsResponse response, Pattern pattern) { + public static List + toSnapshotDescriptionList(GetCompletedSnapshotsResponse response, Pattern pattern) { return response.getSnapshotsList().stream().map(ProtobufUtil::createSnapshotDesc) .filter(snap -> pattern != null ? pattern.matcher(snap.getName()).matches() : true) .collect(Collectors.toList()); } - public static CacheEvictionStats toCacheEvictionStats( - HBaseProtos.CacheEvictionStats stats) throws IOException{ + public static CacheEvictionStats toCacheEvictionStats(HBaseProtos.CacheEvictionStats stats) + throws IOException { CacheEvictionStatsBuilder builder = CacheEvictionStats.builder(); - builder.withEvictedBlocks(stats.getEvictedBlocks()) - .withMaxCacheSize(stats.getMaxCacheSize()); + builder.withEvictedBlocks(stats.getEvictedBlocks()).withMaxCacheSize(stats.getMaxCacheSize()); if (stats.getExceptionCount() > 0) { for (HBaseProtos.RegionExceptionMessage exception : stats.getExceptionList()) { HBaseProtos.RegionSpecifier rs = exception.getRegion(); @@ -3382,60 +3270,47 @@ public static CacheEvictionStats toCacheEvictionStats( return builder.build(); } - public static HBaseProtos.CacheEvictionStats toCacheEvictionStats( - CacheEvictionStats cacheEvictionStats) { - HBaseProtos.CacheEvictionStats.Builder builder - = HBaseProtos.CacheEvictionStats.newBuilder(); + public static HBaseProtos.CacheEvictionStats + toCacheEvictionStats(CacheEvictionStats cacheEvictionStats) { + HBaseProtos.CacheEvictionStats.Builder builder = HBaseProtos.CacheEvictionStats.newBuilder(); for (Map.Entry entry : cacheEvictionStats.getExceptions().entrySet()) { - builder.addException( - RegionExceptionMessage.newBuilder() - .setRegion(RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, entry.getKey())) - .setException(ResponseConverter.buildException(entry.getValue())) - .build() - ); - } - return builder - .setEvictedBlocks(cacheEvictionStats.getEvictedBlocks()) - .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize()) - .build(); + builder.addException(RegionExceptionMessage.newBuilder() + .setRegion( + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, entry.getKey())) + .setException(ResponseConverter.buildException(entry.getValue())).build()); + } + return builder.setEvictedBlocks(cacheEvictionStats.getEvictedBlocks()) + .setMaxCacheSize(cacheEvictionStats.getMaxCacheSize()).build(); } - public static ClusterStatusProtos.ReplicationLoadSource toReplicationLoadSource( - ReplicationLoadSource rls) { - return ClusterStatusProtos.ReplicationLoadSource.newBuilder() - .setPeerID(rls.getPeerID()) + public static ClusterStatusProtos.ReplicationLoadSource + toReplicationLoadSource(ReplicationLoadSource rls) { + return ClusterStatusProtos.ReplicationLoadSource.newBuilder().setPeerID(rls.getPeerID()) .setAgeOfLastShippedOp(rls.getAgeOfLastShippedOp()) .setSizeOfLogQueue((int) rls.getSizeOfLogQueue()) .setTimeStampOfLastShippedOp(rls.getTimestampOfLastShippedOp()) - .setReplicationLag(rls.getReplicationLag()) - .setQueueId(rls.getQueueId()) - .setRecovered(rls.isRecovered()) - .setRunning(rls.isRunning()) + .setReplicationLag(rls.getReplicationLag()).setQueueId(rls.getQueueId()) + .setRecovered(rls.isRecovered()).setRunning(rls.isRunning()) .setEditsSinceRestart(rls.hasEditsSinceRestart()) .setTimeStampOfNextToReplicate(rls.getTimeStampOfNextToReplicate()) - .setOPsShipped(rls.getOPsShipped()) - .setEditsRead(rls.getEditsRead()) - .build(); + .setOPsShipped(rls.getOPsShipped()).setEditsRead(rls.getEditsRead()).build(); } - public static ClusterStatusProtos.ReplicationLoadSink toReplicationLoadSink( - ReplicationLoadSink rls) { + public static ClusterStatusProtos.ReplicationLoadSink + toReplicationLoadSink(ReplicationLoadSink rls) { return ClusterStatusProtos.ReplicationLoadSink.newBuilder() .setAgeOfLastAppliedOp(rls.getAgeOfLastAppliedOp()) .setTimeStampsOfLastAppliedOp(rls.getTimestampsOfLastAppliedOp()) .setTimestampStarted(rls.getTimestampStarted()) - .setTotalOpsProcessed(rls.getTotalOpsProcessed()) - .build(); + .setTotalOpsProcessed(rls.getTotalOpsProcessed()).build(); } public static HBaseProtos.TimeRange toTimeRange(TimeRange timeRange) { if (timeRange == null) { timeRange = TimeRange.allTime(); } - return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()) - .setTo(timeRange.getMax()) - .build(); + return HBaseProtos.TimeRange.newBuilder().setFrom(timeRange.getMin()).setTo(timeRange.getMax()) + .build(); } public static byte[] toCompactionEventTrackerBytes(Set storeFiles) { @@ -3461,8 +3336,8 @@ public static Set toCompactedStoreFiles(byte[] bytes) throws IOException return Collections.emptySet(); } - public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount( - RegionStatesCount regionStatesCount) { + public static ClusterStatusProtos.RegionStatesCount + toTableRegionStatesCount(RegionStatesCount regionStatesCount) { int openRegions = 0; int splitRegions = 0; int closedRegions = 0; @@ -3475,17 +3350,13 @@ public static ClusterStatusProtos.RegionStatesCount toTableRegionStatesCount( regionsInTransition = regionStatesCount.getRegionsInTransition(); totalRegions = regionStatesCount.getTotalRegions(); } - return ClusterStatusProtos.RegionStatesCount.newBuilder() - .setOpenRegions(openRegions) - .setSplitRegions(splitRegions) - .setClosedRegions(closedRegions) - .setRegionsInTransition(regionsInTransition) - .setTotalRegions(totalRegions) - .build(); + return ClusterStatusProtos.RegionStatesCount.newBuilder().setOpenRegions(openRegions) + .setSplitRegions(splitRegions).setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition).setTotalRegions(totalRegions).build(); } - public static RegionStatesCount toTableRegionStatesCount( - ClusterStatusProtos.RegionStatesCount regionStatesCount) { + public static RegionStatesCount + toTableRegionStatesCount(ClusterStatusProtos.RegionStatesCount regionStatesCount) { int openRegions = 0; int splitRegions = 0; int closedRegions = 0; @@ -3498,64 +3369,52 @@ public static RegionStatesCount toTableRegionStatesCount( splitRegions = regionStatesCount.getSplitRegions(); totalRegions = regionStatesCount.getTotalRegions(); } - return new RegionStatesCount.RegionStatesCountBuilder() - .setOpenRegions(openRegions) - .setSplitRegions(splitRegions) - .setClosedRegions(closedRegions) - .setRegionsInTransition(regionsInTransition) - .setTotalRegions(totalRegions) - .build(); + return new RegionStatesCount.RegionStatesCountBuilder().setOpenRegions(openRegions) + .setSplitRegions(splitRegions).setClosedRegions(closedRegions) + .setRegionsInTransition(regionsInTransition).setTotalRegions(totalRegions).build(); } /** * Convert Protobuf class - * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload} - * To client SlowLog Payload class {@link OnlineLogRecord} - * + * {@link org.apache.hadoop.hbase.shaded.protobuf.generated.TooSlowLog.SlowLogPayload} To client + * SlowLog Payload class {@link OnlineLogRecord} * @param slowLogPayload SlowLog Payload protobuf instance * @return SlowLog Payload for client usecase */ - private static LogEntry getSlowLogRecord( - final TooSlowLog.SlowLogPayload slowLogPayload) { + private static LogEntry getSlowLogRecord(final TooSlowLog.SlowLogPayload slowLogPayload) { OnlineLogRecord onlineLogRecord = new OnlineLogRecord.OnlineLogRecordBuilder() - .setCallDetails(slowLogPayload.getCallDetails()) - .setClientAddress(slowLogPayload.getClientAddress()) - .setMethodName(slowLogPayload.getMethodName()) - .setMultiGetsCount(slowLogPayload.getMultiGets()) - .setMultiMutationsCount(slowLogPayload.getMultiMutations()) - .setMultiServiceCalls(slowLogPayload.getMultiServiceCalls()) - .setParam(slowLogPayload.getParam()) - .setProcessingTime(slowLogPayload.getProcessingTime()) - .setQueueTime(slowLogPayload.getQueueTime()) - .setRegionName(slowLogPayload.getRegionName()) - .setResponseSize(slowLogPayload.getResponseSize()) - .setServerClass(slowLogPayload.getServerClass()) - .setStartTime(slowLogPayload.getStartTime()) - .setUserName(slowLogPayload.getUserName()) - .build(); + .setCallDetails(slowLogPayload.getCallDetails()) + .setClientAddress(slowLogPayload.getClientAddress()) + .setMethodName(slowLogPayload.getMethodName()) + .setMultiGetsCount(slowLogPayload.getMultiGets()) + .setMultiMutationsCount(slowLogPayload.getMultiMutations()) + .setMultiServiceCalls(slowLogPayload.getMultiServiceCalls()) + .setParam(slowLogPayload.getParam()).setProcessingTime(slowLogPayload.getProcessingTime()) + .setQueueTime(slowLogPayload.getQueueTime()).setRegionName(slowLogPayload.getRegionName()) + .setResponseSize(slowLogPayload.getResponseSize()) + .setServerClass(slowLogPayload.getServerClass()).setStartTime(slowLogPayload.getStartTime()) + .setUserName(slowLogPayload.getUserName()).build(); return onlineLogRecord; } /** - * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} - * + * Convert AdminProtos#SlowLogResponses to list of {@link OnlineLogRecord} * @param logEntry slowlog response protobuf instance * @return list of SlowLog payloads for client usecase */ - public static List toSlowLogPayloads( - final HBaseProtos.LogEntry logEntry) { + public static List toSlowLogPayloads(final HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("SlowLogResponses")) { - AdminProtos.SlowLogResponses slowLogResponses = (AdminProtos.SlowLogResponses) method - .invoke(null, logEntry.getLogMessage()); + AdminProtos.SlowLogResponses slowLogResponses = + (AdminProtos.SlowLogResponses) method.invoke(null, logEntry.getLogMessage()); return slowLogResponses.getSlowLogPayloadsList().stream() - .map(ProtobufUtil::getSlowLogRecord).collect(Collectors.toList()); + .map(ProtobufUtil::getSlowLogRecord).collect(Collectors.toList()); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + | InvocationTargetException e) { throw new RuntimeException("Error while retrieving response from server"); } throw new RuntimeException("Invalid response from server"); @@ -3563,7 +3422,6 @@ public static List toSlowLogPayloads( /** * Convert {@link ClearSlowLogResponses} to boolean - * * @param clearSlowLogResponses Clear slowlog response protobuf instance * @return boolean representing clear slowlog response */ @@ -3572,20 +3430,20 @@ public static boolean toClearSlowLogPayload(final ClearSlowLogResponses clearSlo } public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, - MutationProto mutation, CellScanner cellScanner) throws IOException { + MutationProto mutation, CellScanner cellScanner) throws IOException { byte[] row = condition.getRow().toByteArray(); CheckAndMutate.Builder builder = CheckAndMutate.newBuilder(row); Filter filter = condition.hasFilter() ? ProtobufUtil.toFilter(condition.getFilter()) : null; if (filter != null) { builder.ifMatches(filter); } else { - builder.ifMatches(condition.getFamily().toByteArray(), - condition.getQualifier().toByteArray(), + builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = + condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); builder.timeRange(timeRange); try { @@ -3608,7 +3466,7 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, } public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, - List mutations) throws IOException { + List mutations) throws IOException { assert mutations.size() > 0; byte[] row = condition.getRow().toByteArray(); CheckAndMutate.Builder builder = CheckAndMutate.newBuilder(row); @@ -3616,13 +3474,13 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, if (filter != null) { builder.ifMatches(filter); } else { - builder.ifMatches(condition.getFamily().toByteArray(), - condition.getQualifier().toByteArray(), + builder.ifMatches(condition.getFamily().toByteArray(), condition.getQualifier().toByteArray(), CompareOperator.valueOf(condition.getCompareType().name()), ProtobufUtil.toComparator(condition.getComparator()).getValue()); } - TimeRange timeRange = condition.hasTimeRange() ? - ProtobufUtil.toTimeRange(condition.getTimeRange()) : TimeRange.allTime(); + TimeRange timeRange = + condition.hasTimeRange() ? ProtobufUtil.toTimeRange(condition.getTimeRange()) + : TimeRange.allTime(); builder.timeRange(timeRange); try { @@ -3637,8 +3495,8 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, } else if (m instanceof Append) { return builder.build((Append) m); } else { - throw new DoNotRetryIOException("Unsupported mutate type: " + m.getClass() - .getSimpleName().toUpperCase()); + throw new DoNotRetryIOException( + "Unsupported mutate type: " + m.getClass().getSimpleName().toUpperCase()); } } else { return builder.build(new RowMutations(mutations.get(0).getRow()).add(mutations)); @@ -3649,169 +3507,150 @@ public static CheckAndMutate toCheckAndMutate(ClientProtos.Condition condition, } public static ClientProtos.Condition toCondition(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, - final TimeRange timeRange) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, final Filter filter, + final TimeRange timeRange) throws IOException { - ClientProtos.Condition.Builder builder = ClientProtos.Condition.newBuilder() - .setRow(UnsafeByteOperations.unsafeWrap(row)); + ClientProtos.Condition.Builder builder = + ClientProtos.Condition.newBuilder().setRow(UnsafeByteOperations.unsafeWrap(row)); if (filter != null) { builder.setFilter(ProtobufUtil.toFilter(filter)); } else { builder.setFamily(UnsafeByteOperations.unsafeWrap(family)) - .setQualifier(UnsafeByteOperations.unsafeWrap( - qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) - .setComparator(ProtobufUtil.toComparator(new BinaryComparator(value))) - .setCompareType(HBaseProtos.CompareType.valueOf(op.name())); + .setQualifier(UnsafeByteOperations + .unsafeWrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)) + .setComparator(ProtobufUtil.toComparator(new BinaryComparator(value))) + .setCompareType(HBaseProtos.CompareType.valueOf(op.name())); } return builder.setTimeRange(ProtobufUtil.toTimeRange(timeRange)).build(); } public static ClientProtos.Condition toCondition(final byte[] row, final Filter filter, - final TimeRange timeRange) throws IOException { + final TimeRange timeRange) throws IOException { return toCondition(row, null, null, null, null, filter, timeRange); } public static ClientProtos.Condition toCondition(final byte[] row, final byte[] family, - final byte[] qualifier, final CompareOperator op, final byte[] value, - final TimeRange timeRange) throws IOException { + final byte[] qualifier, final CompareOperator op, final byte[] value, + final TimeRange timeRange) throws IOException { return toCondition(row, family, qualifier, op, value, null, timeRange); } - public static List toBalancerDecisionResponse( - HBaseProtos.LogEntry logEntry) { + public static List toBalancerDecisionResponse(HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("BalancerDecisionsResponse")) { MasterProtos.BalancerDecisionsResponse response = - (MasterProtos.BalancerDecisionsResponse) method - .invoke(null, logEntry.getLogMessage()); + (MasterProtos.BalancerDecisionsResponse) method.invoke(null, logEntry.getLogMessage()); return getBalancerDecisionEntries(response); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + | InvocationTargetException e) { throw new RuntimeException("Error while retrieving response from server"); } throw new RuntimeException("Invalid response from server"); } - public static List toBalancerRejectionResponse( - HBaseProtos.LogEntry logEntry) { + public static List toBalancerRejectionResponse(HBaseProtos.LogEntry logEntry) { try { final String logClassName = logEntry.getLogClassName(); Class logClass = Class.forName(logClassName).asSubclass(Message.class); Method method = logClass.getMethod("parseFrom", ByteString.class); if (logClassName.contains("BalancerRejectionsResponse")) { MasterProtos.BalancerRejectionsResponse response = - (MasterProtos.BalancerRejectionsResponse) method - .invoke(null, logEntry.getLogMessage()); + (MasterProtos.BalancerRejectionsResponse) method.invoke(null, logEntry.getLogMessage()); return getBalancerRejectionEntries(response); } } catch (ClassNotFoundException | NoSuchMethodException | IllegalAccessException - | InvocationTargetException e) { + | InvocationTargetException e) { throw new RuntimeException("Error while retrieving response from server"); } throw new RuntimeException("Invalid response from server"); } - public static List getBalancerDecisionEntries( - MasterProtos.BalancerDecisionsResponse response) { + public static List + getBalancerDecisionEntries(MasterProtos.BalancerDecisionsResponse response) { List balancerDecisions = response.getBalancerDecisionList(); if (CollectionUtils.isEmpty(balancerDecisions)) { return Collections.emptyList(); } - return balancerDecisions.stream().map(balancerDecision -> new BalancerDecision.Builder() - .setInitTotalCost(balancerDecision.getInitTotalCost()) - .setInitialFunctionCosts(balancerDecision.getInitialFunctionCosts()) - .setComputedTotalCost(balancerDecision.getComputedTotalCost()) - .setFinalFunctionCosts(balancerDecision.getFinalFunctionCosts()) - .setComputedSteps(balancerDecision.getComputedSteps()) - .setRegionPlans(balancerDecision.getRegionPlansList()).build()) - .collect(Collectors.toList()); + return balancerDecisions.stream() + .map(balancerDecision -> new BalancerDecision.Builder() + .setInitTotalCost(balancerDecision.getInitTotalCost()) + .setInitialFunctionCosts(balancerDecision.getInitialFunctionCosts()) + .setComputedTotalCost(balancerDecision.getComputedTotalCost()) + .setFinalFunctionCosts(balancerDecision.getFinalFunctionCosts()) + .setComputedSteps(balancerDecision.getComputedSteps()) + .setRegionPlans(balancerDecision.getRegionPlansList()).build()) + .collect(Collectors.toList()); } - public static List getBalancerRejectionEntries( - MasterProtos.BalancerRejectionsResponse response) { + public static List + getBalancerRejectionEntries(MasterProtos.BalancerRejectionsResponse response) { List balancerRejections = response.getBalancerRejectionList(); if (CollectionUtils.isEmpty(balancerRejections)) { return Collections.emptyList(); } - return balancerRejections.stream().map(balancerRejection -> new BalancerRejection.Builder() - .setReason(balancerRejection.getReason()) - .setCostFuncInfoList(balancerRejection.getCostFuncInfoList()) - .build()) - .collect(Collectors.toList()); + return balancerRejections.stream() + .map(balancerRejection -> new BalancerRejection.Builder() + .setReason(balancerRejection.getReason()) + .setCostFuncInfoList(balancerRejection.getCostFuncInfoList()).build()) + .collect(Collectors.toList()); } public static HBaseProtos.LogRequest toBalancerDecisionRequest(int limit) { MasterProtos.BalancerDecisionsRequest balancerDecisionsRequest = - MasterProtos.BalancerDecisionsRequest.newBuilder().setLimit(limit).build(); + MasterProtos.BalancerDecisionsRequest.newBuilder().setLimit(limit).build(); return HBaseProtos.LogRequest.newBuilder() - .setLogClassName(balancerDecisionsRequest.getClass().getName()) - .setLogMessage(balancerDecisionsRequest.toByteString()) - .build(); + .setLogClassName(balancerDecisionsRequest.getClass().getName()) + .setLogMessage(balancerDecisionsRequest.toByteString()).build(); } public static HBaseProtos.LogRequest toBalancerRejectionRequest(int limit) { MasterProtos.BalancerRejectionsRequest balancerRejectionsRequest = - MasterProtos.BalancerRejectionsRequest.newBuilder().setLimit(limit).build(); + MasterProtos.BalancerRejectionsRequest.newBuilder().setLimit(limit).build(); return HBaseProtos.LogRequest.newBuilder() - .setLogClassName(balancerRejectionsRequest.getClass().getName()) - .setLogMessage(balancerRejectionsRequest.toByteString()) - .build(); + .setLogClassName(balancerRejectionsRequest.getClass().getName()) + .setLogMessage(balancerRejectionsRequest.toByteString()).build(); } public static MasterProtos.BalanceRequest toBalanceRequest(BalanceRequest request) { - return MasterProtos.BalanceRequest.newBuilder() - .setDryRun(request.isDryRun()) - .setIgnoreRit(request.isIgnoreRegionsInTransition()) - .build(); + return MasterProtos.BalanceRequest.newBuilder().setDryRun(request.isDryRun()) + .setIgnoreRit(request.isIgnoreRegionsInTransition()).build(); } public static BalanceRequest toBalanceRequest(MasterProtos.BalanceRequest request) { - return BalanceRequest.newBuilder() - .setDryRun(request.hasDryRun() && request.getDryRun()) - .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()) - .build(); + return BalanceRequest.newBuilder().setDryRun(request.hasDryRun() && request.getDryRun()) + .setIgnoreRegionsInTransition(request.hasIgnoreRit() && request.getIgnoreRit()).build(); } public static MasterProtos.BalanceResponse toBalanceResponse(BalanceResponse response) { - return MasterProtos.BalanceResponse.newBuilder() - .setBalancerRan(response.isBalancerRan()) - .setMovesCalculated(response.getMovesCalculated()) - .setMovesExecuted(response.getMovesExecuted()) - .build(); + return MasterProtos.BalanceResponse.newBuilder().setBalancerRan(response.isBalancerRan()) + .setMovesCalculated(response.getMovesCalculated()) + .setMovesExecuted(response.getMovesExecuted()).build(); } public static BalanceResponse toBalanceResponse(MasterProtos.BalanceResponse response) { return BalanceResponse.newBuilder() - .setBalancerRan(response.hasBalancerRan() && response.getBalancerRan()) - .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesExecuted() : 0) - .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0) - .build(); + .setBalancerRan(response.hasBalancerRan() && response.getBalancerRan()) + .setMovesCalculated(response.hasMovesCalculated() ? response.getMovesExecuted() : 0) + .setMovesExecuted(response.hasMovesExecuted() ? response.getMovesExecuted() : 0).build(); } public static ServerTask getServerTask(ClusterStatusProtos.ServerTask task) { - return ServerTaskBuilder.newBuilder() - .setDescription(task.getDescription()) - .setStatus(task.getStatus()) - .setState(ServerTask.State.valueOf(task.getState().name())) - .setStartTime(task.getStartTime()) - .setCompletionTime(task.getCompletionTime()) - .build(); + return ServerTaskBuilder.newBuilder().setDescription(task.getDescription()) + .setStatus(task.getStatus()).setState(ServerTask.State.valueOf(task.getState().name())) + .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTime()).build(); } public static ClusterStatusProtos.ServerTask toServerTask(ServerTask task) { - return ClusterStatusProtos.ServerTask.newBuilder() - .setDescription(task.getDescription()) - .setStatus(task.getStatus()) - .setState(ClusterStatusProtos.ServerTask.State.valueOf(task.getState().name())) - .setStartTime(task.getStartTime()) - .setCompletionTime(task.getCompletionTime()) - .build(); + return ClusterStatusProtos.ServerTask.newBuilder().setDescription(task.getDescription()) + .setStatus(task.getStatus()) + .setState(ClusterStatusProtos.ServerTask.State.valueOf(task.getState().name())) + .setStartTime(task.getStartTime()).setCompletionTime(task.getCompletionTime()).build(); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java index d00627a0e572..2e7e41280cd1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/RequestConverter.java @@ -127,8 +127,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsCleanerChoreEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsMasterRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsNormalizerEnabledRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .IsSnapshotCleanupEnabledRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSnapshotCleanupEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.IsSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.MergeTableRegionsRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.ModifyColumnRequest; @@ -147,8 +146,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetCleanerChoreRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetNormalizerRunningRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetRegionStateInMetaRequest; -import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos - .SetSnapshotCleanupRequest; +import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSnapshotCleanupRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetSplitOrMergeEnabledRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SetTableStateInMetaRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.SplitTableRegionRequest; @@ -168,8 +166,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos.UpdateReplicationPeerConfigRequest; /** - * Helper utility to build protocol buffer requests, - * or build components for protocol buffer requests. + * Helper utility to build protocol buffer requests, or build components for protocol buffer + * requests. */ @InterfaceAudience.Private public final class RequestConverter { @@ -177,20 +175,18 @@ public final class RequestConverter { private RequestConverter() { } -// Start utilities for Client + // Start utilities for Client /** * Create a protocol buffer GetRequest for a client Get - * * @param regionName the name of the region to get * @param get the client Get * @return a protocol buffer GetRequest */ - public static GetRequest buildGetRequest(final byte[] regionName, - final Get get) throws IOException { + public static GetRequest buildGetRequest(final byte[] regionName, final Get get) + throws IOException { GetRequest.Builder builder = GetRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setGet(ProtobufUtil.toGet(get)); return builder.build(); @@ -198,7 +194,6 @@ public static GetRequest buildGetRequest(final byte[] regionName, /** * Create a protocol buffer MutateRequest for a client increment - * * @param regionName * @param row * @param family @@ -207,12 +202,11 @@ public static GetRequest buildGetRequest(final byte[] regionName, * @param durability * @return a mutate request */ - public static MutateRequest buildIncrementRequest( - final byte[] regionName, final byte[] row, final byte[] family, final byte[] qualifier, - final long amount, final Durability durability, long nonceGroup, long nonce) { + public static MutateRequest buildIncrementRequest(final byte[] regionName, final byte[] row, + final byte[] family, final byte[] qualifier, final long amount, final Durability durability, + long nonceGroup, long nonce) { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); MutationProto.Builder mutateBuilder = MutationProto.newBuilder(); @@ -223,8 +217,8 @@ public static MutateRequest buildIncrementRequest( columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family)); QualifierValue.Builder valueBuilder = QualifierValue.newBuilder(); valueBuilder.setValue(UnsafeByteOperations.unsafeWrap(Bytes.toBytes(amount))); - valueBuilder.setQualifier(UnsafeByteOperations - .unsafeWrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)); + valueBuilder.setQualifier( + UnsafeByteOperations.unsafeWrap(qualifier == null ? HConstants.EMPTY_BYTE_ARRAY : qualifier)); valueBuilder.setTimestamp(HConstants.LATEST_TIMESTAMP); columnBuilder.addQualifierValue(valueBuilder.build()); mutateBuilder.addColumnValue(columnBuilder.build()); @@ -240,61 +234,60 @@ public static MutateRequest buildIncrementRequest( /** * Create a protocol buffer MutateRequest for a conditioned put/delete/increment/append - * * @return a mutate request * @throws IOException */ public static MutateRequest buildMutateRequest(final byte[] regionName, final byte[] row, - final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value, - final Filter filter, final TimeRange timeRange, final Mutation mutation, long nonceGroup, - long nonce) throws IOException { + final byte[] family, final byte[] qualifier, final CompareOperator op, final byte[] value, + final Filter filter, final TimeRange timeRange, final Mutation mutation, long nonceGroup, + long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); if (mutation instanceof Increment || mutation instanceof Append) { builder.setMutation(ProtobufUtil.toMutation(getMutationType(mutation), mutation, nonce)) - .setNonceGroup(nonceGroup); + .setNonceGroup(nonceGroup); } else { builder.setMutation(ProtobufUtil.toMutation(getMutationType(mutation), mutation)); } return builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)) - .setCondition(ProtobufUtil.toCondition(row, family, qualifier, op, value, filter, timeRange)) - .build(); + .setCondition( + ProtobufUtil.toCondition(row, family, qualifier, op, value, filter, timeRange)) + .build(); } /** * Create a protocol buffer MultiRequest for conditioned row mutations - * * @return a multi request * @throws IOException */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, - final byte[] row, final byte[] family, final byte[] qualifier, - final CompareOperator op, final byte[] value, final Filter filter, final TimeRange timeRange, - final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { - return buildMultiRequest(regionName, rowMutations, ProtobufUtil.toCondition(row, family, - qualifier, op, value, filter, timeRange), nonceGroup, nonce); + final byte[] row, final byte[] family, final byte[] qualifier, final CompareOperator op, + final byte[] value, final Filter filter, final TimeRange timeRange, + final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { + return buildMultiRequest(regionName, rowMutations, + ProtobufUtil.toCondition(row, family, qualifier, op, value, filter, timeRange), nonceGroup, + nonce); } /** * Create a protocol buffer MultiRequest for row mutations - * * @return a multi request */ public static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, - final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { + final RowMutations rowMutations, long nonceGroup, long nonce) throws IOException { return buildMultiRequest(regionName, rowMutations, null, nonceGroup, nonce); } private static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, - final RowMutations rowMutations, final Condition condition, long nonceGroup, long nonce) - throws IOException { + final RowMutations rowMutations, final Condition condition, long nonceGroup, long nonce) + throws IOException { RegionAction.Builder builder = - getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName); + getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName); builder.setAtomic(true); boolean hasNonce = false; ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder(); MutationProto.Builder mutationBuilder = MutationProto.newBuilder(); - for (Mutation mutation: rowMutations.getMutations()) { + for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); MutationProto mp; if (mutation instanceof Increment || mutation instanceof Append) { @@ -322,17 +315,15 @@ private static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionNa /** * Create a protocol buffer MutateRequest for a put - * * @param regionName * @param put * @return a mutate request * @throws IOException */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Put put) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Put put) + throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); builder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, put, MutationProto.newBuilder())); return builder.build(); @@ -340,68 +331,62 @@ public static MutateRequest buildMutateRequest( /** * Create a protocol buffer MutateRequest for an append - * * @param regionName * @param append * @return a mutate request * @throws IOException */ - public static MutateRequest buildMutateRequest(final byte[] regionName, - final Append append, long nonceGroup, long nonce) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Append append, + long nonceGroup, long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } - builder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, append, - MutationProto.newBuilder(), nonce)); + builder.setMutation( + ProtobufUtil.toMutation(MutationType.APPEND, append, MutationProto.newBuilder(), nonce)); return builder.build(); } /** * Create a protocol buffer MutateRequest for a client increment - * * @param regionName * @param increment * @return a mutate request */ - public static MutateRequest buildMutateRequest(final byte[] regionName, - final Increment increment, final long nonceGroup, final long nonce) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Increment increment, + final long nonceGroup, final long nonce) throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (nonce != HConstants.NO_NONCE && nonceGroup != HConstants.NO_NONCE) { builder.setNonceGroup(nonceGroup); } builder.setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, increment, - MutationProto.newBuilder(), nonce)); + MutationProto.newBuilder(), nonce)); return builder.build(); } /** * Create a protocol buffer MutateRequest for a delete - * * @param regionName * @param delete * @return a mutate request * @throws IOException */ - public static MutateRequest buildMutateRequest( - final byte[] regionName, final Delete delete) throws IOException { + public static MutateRequest buildMutateRequest(final byte[] regionName, final Delete delete) + throws IOException { MutateRequest.Builder builder = MutateRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); - builder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, - MutationProto.newBuilder())); + builder.setMutation( + ProtobufUtil.toMutation(MutationType.DELETE, delete, MutationProto.newBuilder())); return builder.build(); } public static RegionAction.Builder getRegionActionBuilderWithRegion( - final RegionAction.Builder regionActionBuilder, final byte [] regionName) { + final RegionAction.Builder regionActionBuilder, final byte[] regionName) { RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); regionActionBuilder.setRegion(region); return regionActionBuilder; @@ -409,7 +394,6 @@ public static RegionAction.Builder getRegionActionBuilderWithRegion( /** * Create a protocol buffer ScanRequest for a client Scan - * * @param regionName * @param scan * @param numberOfRows @@ -480,7 +464,6 @@ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boo /** * Create a protocol buffer bulk load request - * * @param familyPaths * @param regionName * @param assignSeqNum @@ -489,16 +472,14 @@ public static ScanRequest buildScanRequest(long scannerId, int numberOfRows, boo * @return a bulk load request */ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( - final List> familyPaths, - final byte[] regionName, boolean assignSeqNum, + final List> familyPaths, final byte[] regionName, boolean assignSeqNum, final Token userToken, final String bulkToken) { return buildBulkLoadHFileRequest(familyPaths, regionName, assignSeqNum, userToken, bulkToken, - false, null, true); + false, null, true); } /** * Create a protocol buffer bulk load request - * * @param familyPaths * @param regionName * @param assignSeqNum @@ -509,38 +490,34 @@ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( */ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( final List> familyPaths, final byte[] regionName, boolean assignSeqNum, - final Token userToken, final String bulkToken, boolean copyFiles, - List clusterIds, boolean replicate) { - RegionSpecifier region = RequestConverter.buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + final Token userToken, final String bulkToken, boolean copyFiles, List clusterIds, + boolean replicate) { + RegionSpecifier region = + RequestConverter.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); ClientProtos.DelegationToken protoDT = null; if (userToken != null) { - protoDT = - ClientProtos.DelegationToken.newBuilder() - .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) - .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); + protoDT = ClientProtos.DelegationToken.newBuilder() + .setIdentifier(UnsafeByteOperations.unsafeWrap(userToken.getIdentifier())) + .setPassword(UnsafeByteOperations.unsafeWrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); } - List protoFamilyPaths = new ArrayList<>(familyPaths.size()); + List protoFamilyPaths = + new ArrayList<>(familyPaths.size()); if (!familyPaths.isEmpty()) { - ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder - = ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); - for(Pair el: familyPaths) { - protoFamilyPaths.add(pathBuilder - .setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())) - .setPath(el.getSecond()).build()); + ClientProtos.BulkLoadHFileRequest.FamilyPath.Builder pathBuilder = + ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder(); + for (Pair el : familyPaths) { + protoFamilyPaths.add(pathBuilder.setFamily(UnsafeByteOperations.unsafeWrap(el.getFirst())) + .setPath(el.getSecond()).build()); } pathBuilder.clear(); } - BulkLoadHFileRequest.Builder request = - ClientProtos.BulkLoadHFileRequest.newBuilder() - .setRegion(region) - .setAssignSeqNum(assignSeqNum) - .addAllFamilyPath(protoFamilyPaths); + BulkLoadHFileRequest.Builder request = ClientProtos.BulkLoadHFileRequest.newBuilder() + .setRegion(region).setAssignSeqNum(assignSeqNum).addAllFamilyPath(protoFamilyPaths); if (userToken != null) { request.setFsToken(protoDT); } @@ -566,61 +543,59 @@ public static BulkLoadHFileRequest buildBulkLoadHFileRequest( * @param mutationBuilder mutationBuilder to be used to build mutation. * @param nonceGroup nonceGroup to be applied. * @param indexMap Map of created RegionAction to the original index for a - * RowMutations/CheckAndMutate within the original list of actions + * RowMutations/CheckAndMutate within the original list of actions * @throws IOException */ - public static void buildRegionActions(final byte[] regionName, - final List actions, final MultiRequest.Builder multiRequestBuilder, + public static void buildRegionActions(final byte[] regionName, final List actions, + final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, long nonceGroup, final Map indexMap) throws IOException { regionActionBuilder.clear(); - RegionAction.Builder builder = getRegionActionBuilderWithRegion( - regionActionBuilder, regionName); + RegionAction.Builder builder = + getRegionActionBuilderWithRegion(regionActionBuilder, regionName); ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null; boolean hasNonce = false; List rowMutationsList = new ArrayList<>(); List checkAndMutates = new ArrayList<>(); - for (Action action: actions) { + for (Action action : actions) { Row row = action.getAction(); actionBuilder.clear(); actionBuilder.setIndex(action.getOriginalIndex()); mutationBuilder.clear(); if (row instanceof Get) { - Get g = (Get)row; + Get g = (Get) row; builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g))); } else if (row instanceof Put) { - builder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put)row, mutationBuilder))); + builder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put) row, mutationBuilder))); } else if (row instanceof Delete) { - builder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutation(MutationType.DELETE, (Delete)row, mutationBuilder))); + builder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutation(MutationType.DELETE, (Delete) row, mutationBuilder))); } else if (row instanceof Append) { - builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation( - MutationType.APPEND, (Append)row, mutationBuilder, action.getNonce()))); + builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, + (Append) row, mutationBuilder, action.getNonce()))); hasNonce = true; } else if (row instanceof Increment) { - builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation( - MutationType.INCREMENT, (Increment)row, mutationBuilder, action.getNonce()))); + builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, + (Increment) row, mutationBuilder, action.getNonce()))); hasNonce = true; } else if (row instanceof RegionCoprocessorServiceExec) { RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row; // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString. org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = - org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap( - exec.getRequest().toByteArray()); + org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations + .unsafeWrap(exec.getRequest().toByteArray()); if (cpBuilder == null) { cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder(); } else { cpBuilder.clear(); } - builder.addAction(actionBuilder.setServiceCall( - cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) - .setServiceName(exec.getMethod().getService().getFullName()) - .setMethodName(exec.getMethod().getName()) - .setRequest(value))); + builder.addAction(actionBuilder + .setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) + .setServiceName(exec.getMethod().getService().getFullName()) + .setMethodName(exec.getMethod().getName()).setRequest(value))); } else if (row instanceof RowMutations) { rowMutationsList.add(action); } else if (row instanceof CheckAndMutate) { @@ -645,16 +620,14 @@ public static void buildRegionActions(final byte[] regionName, builder.clear(); getRegionActionBuilderWithRegion(builder, regionName); - buildRegionAction((RowMutations) action.getAction(), builder, actionBuilder, - mutationBuilder); + buildRegionAction((RowMutations) action.getAction(), builder, actionBuilder, mutationBuilder); builder.setAtomic(true); multiRequestBuilder.addRegionAction(builder.build()); // This rowMutations region action is at (multiRequestBuilder.getRegionActionCount() - 1) // in the overall multiRequest. - indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, - action.getOriginalIndex()); + indexMap.put(multiRequestBuilder.getRegionActionCount() - 1, action.getOriginalIndex()); } // Process CheckAndMutate here. Similar to RowMutations, we do separate RegionAction for each @@ -665,27 +638,26 @@ public static void buildRegionActions(final byte[] regionName, getRegionActionBuilderWithRegion(builder, regionName); CheckAndMutate cam = (CheckAndMutate) action.getAction(); - builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), - cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), - cam.getTimeRange())); + builder + .setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), + cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange())); if (cam.getAction() instanceof Put) { actionBuilder.clear(); mutationBuilder.clear(); - builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, - (Put) cam.getAction(), mutationBuilder))); + builder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutation(MutationType.PUT, (Put) cam.getAction(), mutationBuilder))); } else if (cam.getAction() instanceof Delete) { actionBuilder.clear(); mutationBuilder.clear(); - builder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, - (Delete) cam.getAction(), mutationBuilder))); + builder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutation(MutationType.DELETE, (Delete) cam.getAction(), mutationBuilder))); } else if (cam.getAction() instanceof RowMutations) { - buildRegionAction((RowMutations) cam.getAction(), builder, actionBuilder, - mutationBuilder); + buildRegionAction((RowMutations) cam.getAction(), builder, actionBuilder, mutationBuilder); builder.setAtomic(true); } else { - throw new DoNotRetryIOException("CheckAndMutate doesn't support " + - cam.getAction().getClass().getName()); + throw new DoNotRetryIOException( + "CheckAndMutate doesn't support " + cam.getAction().getClass().getName()); } multiRequestBuilder.addRegionAction(builder.build()); @@ -697,18 +669,18 @@ public static void buildRegionActions(final byte[] regionName, } private static void buildRegionAction(final RowMutations rowMutations, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { - for (Mutation mutation: rowMutations.getMutations()) { + final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { + for (Mutation mutation : rowMutations.getMutations()) { MutationType mutateType; if (mutation instanceof Put) { mutateType = MutationType.PUT; } else if (mutation instanceof Delete) { mutateType = MutationType.DELETE; } else { - throw new DoNotRetryIOException("RowMutations supports only put and delete, not " + - mutation.getClass().getName()); + throw new DoNotRetryIOException( + "RowMutations supports only put and delete, not " + mutation.getClass().getName()); } mutationBuilder.clear(); MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder); @@ -719,13 +691,15 @@ private static void buildRegionAction(final RowMutations rowMutations, /** * Create a protocol buffer multirequest with NO data for a list of actions (data is carried - * otherwise than via protobuf). This means it just notes attributes, whether to write the - * WAL, etc., and the presence in protobuf serves as place holder for the data which is - * coming along otherwise. Note that Get is different. It does not contain 'data' and is always - * carried by protobuf. We return references to the data by adding them to the passed in - * data param. - *

    Propagates Actions original index. - *

    The passed in multiRequestBuilder will be populated with region actions. + * otherwise than via protobuf). This means it just notes attributes, whether to write the WAL, + * etc., and the presence in protobuf serves as place holder for the data which is coming along + * otherwise. Note that Get is different. It does not contain 'data' and is always carried by + * protobuf. We return references to the data by adding them to the passed in data + * param. + *

    + * Propagates Actions original index. + *

    + * The passed in multiRequestBuilder will be populated with region actions. * @param regionName The region name of the actions. * @param actions The actions that are grouped by the same region name. * @param cells Place to stuff references to actual data. @@ -735,31 +709,30 @@ private static void buildRegionAction(final RowMutations rowMutations, * @param mutationBuilder mutationBuilder to be used to build mutation. * @param nonceGroup nonceGroup to be applied. * @param indexMap Map of created RegionAction to the original index for a - * RowMutations/CheckAndMutate within the original list of actions + * RowMutations/CheckAndMutate within the original list of actions * @throws IOException */ public static void buildNoDataRegionActions(final byte[] regionName, final Iterable actions, final List cells, final MultiRequest.Builder multiRequestBuilder, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder, long nonceGroup, final Map indexMap) throws IOException { regionActionBuilder.clear(); - RegionAction.Builder builder = getRegionActionBuilderWithRegion( - regionActionBuilder, regionName); + RegionAction.Builder builder = + getRegionActionBuilderWithRegion(regionActionBuilder, regionName); ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null; boolean hasNonce = false; List rowMutationsList = new ArrayList<>(); List checkAndMutates = new ArrayList<>(); - for (Action action: actions) { + for (Action action : actions) { Row row = action.getAction(); actionBuilder.clear(); actionBuilder.setIndex(action.getOriginalIndex()); mutationBuilder.clear(); if (row instanceof Get) { - Get g = (Get)row; + Get g = (Get) row; builder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g))); } else if (row instanceof Put) { buildNoDataRegionAction((Put) row, cells, builder, actionBuilder, mutationBuilder); @@ -777,18 +750,17 @@ public static void buildNoDataRegionActions(final byte[] regionName, RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row; // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString. org.apache.hbase.thirdparty.com.google.protobuf.ByteString value = - org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.unsafeWrap( - exec.getRequest().toByteArray()); + org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations + .unsafeWrap(exec.getRequest().toByteArray()); if (cpBuilder == null) { cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder(); } else { cpBuilder.clear(); } - builder.addAction(actionBuilder.setServiceCall( - cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) - .setServiceName(exec.getMethod().getService().getFullName()) - .setMethodName(exec.getMethod().getName()) - .setRequest(value))); + builder.addAction(actionBuilder + .setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())) + .setServiceName(exec.getMethod().getService().getFullName()) + .setMethodName(exec.getMethod().getName()).setRequest(value))); } else if (row instanceof RowMutations) { rowMutationsList.add(action); } else if (row instanceof CheckAndMutate) { @@ -832,9 +804,9 @@ public static void buildNoDataRegionActions(final byte[] regionName, getRegionActionBuilderWithRegion(builder, regionName); CheckAndMutate cam = (CheckAndMutate) action.getAction(); - builder.setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), - cam.getQualifier(), cam.getCompareOp(), cam.getValue(), cam.getFilter(), - cam.getTimeRange())); + builder + .setCondition(ProtobufUtil.toCondition(cam.getRow(), cam.getFamily(), cam.getQualifier(), + cam.getCompareOp(), cam.getValue(), cam.getFilter(), cam.getTimeRange())); if (cam.getAction() instanceof Put) { actionBuilder.clear(); @@ -866,8 +838,8 @@ public static void buildNoDataRegionActions(final byte[] regionName, } builder.setAtomic(true); } else { - throw new DoNotRetryIOException("CheckAndMutate doesn't support " + - cam.getAction().getClass().getName()); + throw new DoNotRetryIOException( + "CheckAndMutate doesn't support " + cam.getAction().getClass().getName()); } multiRequestBuilder.addRegionAction(builder.build()); @@ -883,61 +855,61 @@ public static void buildNoDataRegionActions(final byte[] regionName, } private static void buildNoDataRegionAction(final Put put, final List cells, - final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(put); - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutationNoData(MutationType.PUT, put, mutationBuilder))); } - private static void buildNoDataRegionAction(final Delete delete, - final List cells, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + private static void buildNoDataRegionAction(final Delete delete, final List cells, + final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { int size = delete.size(); // Note that a legitimate Delete may have a size of zero; i.e. a Delete that has nothing - // in it but the row to delete. In this case, the current implementation does not make + // in it but the row to delete. In this case, the current implementation does not make // a KeyValue to represent a delete-of-all-the-row until we serialize... For such cases // where the size returned is zero, we will send the Delete fully pb'd rather than have // metadata only in the pb and then send the kv along the side in cells. if (size > 0) { cells.add(delete); - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutationNoData(MutationType.DELETE, delete, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.DELETE, delete, mutationBuilder))); } else { - regionActionBuilder.addAction(actionBuilder. - setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, mutationBuilder))); + regionActionBuilder.addAction(actionBuilder + .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, delete, mutationBuilder))); } } private static void buildNoDataRegionAction(final Increment increment, - final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + final List cells, long nonce, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(increment); - regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( - MutationType.INCREMENT, increment, mutationBuilder, nonce))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.INCREMENT, increment, mutationBuilder, nonce))); } - private static void buildNoDataRegionAction(final Append append, - final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, - final MutationProto.Builder mutationBuilder) throws IOException { + private static void buildNoDataRegionAction(final Append append, final List cells, + long nonce, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { cells.add(append); - regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutationNoData( - MutationType.APPEND, append, mutationBuilder, nonce))); + regionActionBuilder.addAction(actionBuilder.setMutation( + ProtobufUtil.toMutationNoData(MutationType.APPEND, append, mutationBuilder, nonce))); } /** * @return whether or not the rowMutations has a Increment or Append */ private static boolean buildNoDataRegionAction(final RowMutations rowMutations, - final List cells, long nonce, final RegionAction.Builder regionActionBuilder, - final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) - throws IOException { + final List cells, long nonce, final RegionAction.Builder regionActionBuilder, + final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) + throws IOException { boolean ret = false; - for (Mutation mutation: rowMutations.getMutations()) { + for (Mutation mutation : rowMutations.getMutations()) { mutationBuilder.clear(); MutationProto mp; if (mutation instanceof Increment || mutation instanceof Append) { @@ -966,45 +938,39 @@ private static MutationType getMutationType(Mutation mutation) { } } -// End utilities for Client -//Start utilities for Admin + // End utilities for Client + // Start utilities for Admin /** * Create a protocol buffer GetRegionInfoRequest for a given region name - * * @param regionName the name of the region to get info * @return a protocol buffer GetRegionInfoRequest */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName) { + public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName) { return buildGetRegionInfoRequest(regionName, false); } /** * Create a protocol buffer GetRegionInfoRequest for a given region name - * * @param regionName the name of the region to get info * @param includeCompactionState indicate if the compaction state is requested * @return a protocol buffer GetRegionInfoRequest */ - public static GetRegionInfoRequest - buildGetRegionInfoRequest(final byte[] regionName, - final boolean includeCompactionState) { + public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, + final boolean includeCompactionState) { return buildGetRegionInfoRequest(regionName, includeCompactionState, false); } /** - * - * @param regionName the name of the region to get info - * @param includeCompactionState indicate if the compaction state is requested - * @param includeBestSplitRow indicate if the bestSplitRow is requested + * @param regionName the name of the region to get info + * @param includeCompactionState indicate if the compaction state is requested + * @param includeBestSplitRow indicate if the bestSplitRow is requested * @return protocol buffer GetRegionInfoRequest */ public static GetRegionInfoRequest buildGetRegionInfoRequest(final byte[] regionName, final boolean includeCompactionState, boolean includeBestSplitRow) { GetRegionInfoRequest.Builder builder = GetRegionInfoRequest.newBuilder(); - RegionSpecifier region = buildRegionSpecifier( - RegionSpecifierType.REGION_NAME, regionName); + RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); if (includeCompactionState) { builder.setCompactionState(includeCompactionState); @@ -1052,7 +1018,7 @@ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName * @return a protocol buffer FlushRegionRequest */ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName, - byte[] columnFamily, boolean writeFlushWALMarker) { + byte[] columnFamily, boolean writeFlushWALMarker) { FlushRegionRequest.Builder builder = FlushRegionRequest.newBuilder(); RegionSpecifier region = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName); builder.setRegion(region); @@ -1070,8 +1036,8 @@ public static FlushRegionRequest buildFlushRegionRequest(final byte[] regionName * @param favoredNodes a list of favored nodes * @return a protocol buffer OpenRegionRequest */ - public static OpenRegionRequest buildOpenRegionRequest(ServerName server, - final RegionInfo region, List favoredNodes) { + public static OpenRegionRequest buildOpenRegionRequest(ServerName server, final RegionInfo region, + List favoredNodes) { OpenRegionRequest.Builder builder = OpenRegionRequest.newBuilder(); builder.addOpenInfo(buildRegionOpenInfo(region, favoredNodes, -1L)); if (server != null) { @@ -1135,8 +1101,8 @@ public static CompactRegionRequest buildCompactRegionRequest(byte[] regionName, /** * @see #buildRollWALWriterRequest() */ - private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder() - .build(); + private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = + RollWALWriterRequest.newBuilder().build(); /** * Create a new RollWALWriterRequest @@ -1149,8 +1115,8 @@ public static RollWALWriterRequest buildRollWALWriterRequest() { /** * @see #buildGetServerInfoRequest() */ - private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = GetServerInfoRequest.newBuilder() - .build(); + private static GetServerInfoRequest GET_SERVER_INFO_REQUEST = + GetServerInfoRequest.newBuilder().build(); /** * Create a new GetServerInfoRequest @@ -1171,17 +1137,16 @@ public static StopServerRequest buildStopServerRequest(final String reason) { return builder.build(); } -//End utilities for Admin + // End utilities for Admin /** * Convert a byte array to a protocol buffer RegionSpecifier - * * @param type the region specifier type * @param value the region specifier byte array value * @return a protocol buffer RegionSpecifier */ - public static RegionSpecifier buildRegionSpecifier( - final RegionSpecifierType type, final byte[] value) { + public static RegionSpecifier buildRegionSpecifier(final RegionSpecifierType type, + final byte[] value) { RegionSpecifier.Builder regionBuilder = RegionSpecifier.newBuilder(); regionBuilder.setValue(UnsafeByteOperations.unsafeWrap(value)); regionBuilder.setType(type); @@ -1190,16 +1155,12 @@ public static RegionSpecifier buildRegionSpecifier( /** * Create a protocol buffer AddColumnRequest - * * @param tableName * @param column * @return an AddColumnRequest */ - public static AddColumnRequest buildAddColumnRequest( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) { + public static AddColumnRequest buildAddColumnRequest(final TableName tableName, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { AddColumnRequest.Builder builder = AddColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); @@ -1210,16 +1171,12 @@ public static AddColumnRequest buildAddColumnRequest( /** * Create a protocol buffer DeleteColumnRequest - * * @param tableName * @param columnName * @return a DeleteColumnRequest */ - public static DeleteColumnRequest buildDeleteColumnRequest( - final TableName tableName, - final byte [] columnName, - final long nonceGroup, - final long nonce) { + public static DeleteColumnRequest buildDeleteColumnRequest(final TableName tableName, + final byte[] columnName, final long nonceGroup, final long nonce) { DeleteColumnRequest.Builder builder = DeleteColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setColumnName(UnsafeByteOperations.unsafeWrap(columnName)); @@ -1230,16 +1187,12 @@ public static DeleteColumnRequest buildDeleteColumnRequest( /** * Create a protocol buffer ModifyColumnRequest - * * @param tableName * @param column * @return an ModifyColumnRequest */ - public static ModifyColumnRequest buildModifyColumnRequest( - final TableName tableName, - final ColumnFamilyDescriptor column, - final long nonceGroup, - final long nonce) { + public static ModifyColumnRequest buildModifyColumnRequest(final TableName tableName, + final ColumnFamilyDescriptor column, final long nonceGroup, final long nonce) { ModifyColumnRequest.Builder builder = ModifyColumnRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setColumnFamilies(ProtobufUtil.toColumnFamilySchema(column)); @@ -1248,11 +1201,11 @@ public static ModifyColumnRequest buildModifyColumnRequest( return builder.build(); } - public static ModifyColumnStoreFileTrackerRequest - buildModifyColumnStoreFileTrackerRequest(final TableName tableName, final byte[] family, - final String dstSFT, final long nonceGroup, final long nonce) { + public static ModifyColumnStoreFileTrackerRequest buildModifyColumnStoreFileTrackerRequest( + final TableName tableName, final byte[] family, final String dstSFT, final long nonceGroup, + final long nonce) { ModifyColumnStoreFileTrackerRequest.Builder builder = - ModifyColumnStoreFileTrackerRequest.newBuilder(); + ModifyColumnStoreFileTrackerRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setFamily(ByteString.copyFrom(family)); builder.setDstSft(dstSFT); @@ -1270,8 +1223,8 @@ public static ModifyColumnRequest buildModifyColumnRequest( public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName, ServerName destServerName) { MoveRegionRequest.Builder builder = MoveRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, - encodedRegionName)); + builder.setRegion( + buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, encodedRegionName)); if (destServerName != null) { builder.setDestServerName(ProtobufUtil.toServerName(destServerName)); } @@ -1279,14 +1232,12 @@ public static MoveRegionRequest buildMoveRegionRequest(byte[] encodedRegionName, } public static MergeTableRegionsRequest buildMergeTableRegionsRequest( - final byte[][] encodedNameOfdaughaterRegions, - final boolean forcible, - final long nonceGroup, + final byte[][] encodedNameOfdaughaterRegions, final boolean forcible, final long nonceGroup, final long nonce) throws DeserializationException { MergeTableRegionsRequest.Builder builder = MergeTableRegionsRequest.newBuilder(); - for (int i = 0; i< encodedNameOfdaughaterRegions.length; i++) { - builder.addRegion(buildRegionSpecifier( - RegionSpecifierType.ENCODED_REGION_NAME, encodedNameOfdaughaterRegions[i])); + for (int i = 0; i < encodedNameOfdaughaterRegions.length; i++) { + builder.addRegion(buildRegionSpecifier(RegionSpecifierType.ENCODED_REGION_NAME, + encodedNameOfdaughaterRegions[i])); } builder.setForcible(forcible); builder.setNonceGroup(nonceGroup); @@ -1309,51 +1260,44 @@ public static SplitTableRegionRequest buildSplitTableRegionRequest(final RegionI /** * Create a protocol buffer AssignRegionRequest - * * @param regionName * @return an AssignRegionRequest */ - public static AssignRegionRequest buildAssignRegionRequest(final byte [] regionName) { + public static AssignRegionRequest buildAssignRegionRequest(final byte[] regionName) { AssignRegionRequest.Builder builder = AssignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** * Creates a protocol buffer UnassignRegionRequest - * * @param regionName * @return an UnassignRegionRequest */ - public static UnassignRegionRequest buildUnassignRegionRequest( - final byte [] regionName) { + public static UnassignRegionRequest buildUnassignRegionRequest(final byte[] regionName) { UnassignRegionRequest.Builder builder = UnassignRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** * Creates a protocol buffer OfflineRegionRequest - * * @param regionName * @return an OfflineRegionRequest */ - public static OfflineRegionRequest buildOfflineRegionRequest(final byte [] regionName) { + public static OfflineRegionRequest buildOfflineRegionRequest(final byte[] regionName) { OfflineRegionRequest.Builder builder = OfflineRegionRequest.newBuilder(); - builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME,regionName)); + builder.setRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, regionName)); return builder.build(); } /** * Creates a protocol buffer DeleteTableRequest - * * @param tableName * @return a DeleteTableRequest */ - public static DeleteTableRequest buildDeleteTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static DeleteTableRequest buildDeleteTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { DeleteTableRequest.Builder builder = DeleteTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); @@ -1363,16 +1307,12 @@ public static DeleteTableRequest buildDeleteTableRequest( /** * Creates a protocol buffer TruncateTableRequest - * * @param tableName name of table to truncate * @param preserveSplits True if the splits should be preserved * @return a TruncateTableRequest */ - public static TruncateTableRequest buildTruncateTableRequest( - final TableName tableName, - final boolean preserveSplits, - final long nonceGroup, - final long nonce) { + public static TruncateTableRequest buildTruncateTableRequest(final TableName tableName, + final boolean preserveSplits, final long nonceGroup, final long nonce) { TruncateTableRequest.Builder builder = TruncateTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setPreserveSplits(preserveSplits); @@ -1383,14 +1323,11 @@ public static TruncateTableRequest buildTruncateTableRequest( /** * Creates a protocol buffer EnableTableRequest - * * @param tableName * @return an EnableTableRequest */ - public static EnableTableRequest buildEnableTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static EnableTableRequest buildEnableTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { EnableTableRequest.Builder builder = EnableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName(tableName)); builder.setNonceGroup(nonceGroup); @@ -1400,14 +1337,11 @@ public static EnableTableRequest buildEnableTableRequest( /** * Creates a protocol buffer DisableTableRequest - * * @param tableName * @return a DisableTableRequest */ - public static DisableTableRequest buildDisableTableRequest( - final TableName tableName, - final long nonceGroup, - final long nonce) { + public static DisableTableRequest buildDisableTableRequest(final TableName tableName, + final long nonceGroup, final long nonce) { DisableTableRequest.Builder builder = DisableTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setNonceGroup(nonceGroup); @@ -1417,20 +1351,16 @@ public static DisableTableRequest buildDisableTableRequest( /** * Creates a protocol buffer CreateTableRequest - * * @param tableDescriptor * @param splitKeys * @return a CreateTableRequest */ - public static CreateTableRequest buildCreateTableRequest( - final TableDescriptor tableDescriptor, - final byte [][] splitKeys, - final long nonceGroup, - final long nonce) { + public static CreateTableRequest buildCreateTableRequest(final TableDescriptor tableDescriptor, + final byte[][] splitKeys, final long nonceGroup, final long nonce) { CreateTableRequest.Builder builder = CreateTableRequest.newBuilder(); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDescriptor)); if (splitKeys != null) { - for(byte[] key : splitKeys) { + for (byte[] key : splitKeys) { builder.addSplitKeys(UnsafeByteOperations.unsafeWrap(key)); } } @@ -1441,16 +1371,12 @@ public static CreateTableRequest buildCreateTableRequest( /** * Creates a protocol buffer ModifyTableRequest - * * @param tableName * @param tableDesc * @return a ModifyTableRequest */ - public static ModifyTableRequest buildModifyTableRequest( - final TableName tableName, - final TableDescriptor tableDesc, - final long nonceGroup, - final long nonce) { + public static ModifyTableRequest buildModifyTableRequest(final TableName tableName, + final TableDescriptor tableDesc, final long nonceGroup, final long nonce) { ModifyTableRequest.Builder builder = ModifyTableRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setTableSchema(ProtobufUtil.toTableSchema(tableDesc)); @@ -1460,9 +1386,9 @@ public static ModifyTableRequest buildModifyTableRequest( } public static ModifyTableStoreFileTrackerRequest buildModifyTableStoreFileTrackerRequest( - final TableName tableName, final String dstSFT, final long nonceGroup, final long nonce) { + final TableName tableName, final String dstSFT, final long nonceGroup, final long nonce) { ModifyTableStoreFileTrackerRequest.Builder builder = - ModifyTableStoreFileTrackerRequest.newBuilder(); + ModifyTableStoreFileTrackerRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); builder.setDstSft(dstSFT); builder.setNonceGroup(nonceGroup); @@ -1472,12 +1398,11 @@ public static ModifyTableStoreFileTrackerRequest buildModifyTableStoreFileTracke /** * Creates a protocol buffer GetSchemaAlterStatusRequest - * * @param tableName * @return a GetSchemaAlterStatusRequest */ - public static GetSchemaAlterStatusRequest buildGetSchemaAlterStatusRequest( - final TableName tableName) { + public static GetSchemaAlterStatusRequest + buildGetSchemaAlterStatusRequest(final TableName tableName) { GetSchemaAlterStatusRequest.Builder builder = GetSchemaAlterStatusRequest.newBuilder(); builder.setTableName(ProtobufUtil.toProtoTableName((tableName))); return builder.build(); @@ -1485,12 +1410,11 @@ public static GetSchemaAlterStatusRequest buildGetSchemaAlterStatusRequest( /** * Creates a protocol buffer GetTableDescriptorsRequest - * * @param tableNames * @return a GetTableDescriptorsRequest */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final List tableNames) { + public static GetTableDescriptorsRequest + buildGetTableDescriptorsRequest(final List tableNames) { GetTableDescriptorsRequest.Builder builder = GetTableDescriptorsRequest.newBuilder(); if (tableNames != null) { for (TableName tableName : tableNames) { @@ -1502,7 +1426,6 @@ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( /** * Creates a protocol buffer GetTableDescriptorsRequest - * * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableDescriptorsRequest @@ -1519,7 +1442,6 @@ public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest(final P /** * Creates a protocol buffer GetTableNamesRequest - * * @param pattern The compiled regular expression to match against * @param includeSysTables False to match only against userspace tables * @return a GetTableNamesRequest @@ -1536,15 +1458,12 @@ public static GetTableNamesRequest buildGetTableNamesRequest(final Pattern patte /** * Creates a protocol buffer GetTableStateRequest - * * @param tableName table to get request for * @return a GetTableStateRequest */ - public static GetTableStateRequest buildGetTableStateRequest( - final TableName tableName) { - return GetTableStateRequest.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName(tableName)) - .build(); + public static GetTableStateRequest buildGetTableStateRequest(final TableName tableName) { + return GetTableStateRequest.newBuilder().setTableName(ProtobufUtil.toProtoTableName(tableName)) + .build(); } /** @@ -1563,7 +1482,7 @@ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final T * @return a SetRegionStateInMetaRequest */ public static SetRegionStateInMetaRequest - buildSetRegionStateInMetaRequest(Map nameOrEncodedName2State) { + buildSetRegionStateInMetaRequest(Map nameOrEncodedName2State) { SetRegionStateInMetaRequest.Builder builder = SetRegionStateInMetaRequest.newBuilder(); nameOrEncodedName2State.forEach((name, state) -> { byte[] bytes = Bytes.toBytes(name); @@ -1574,27 +1493,24 @@ public static SetTableStateInMetaRequest buildSetTableStateInMetaRequest(final T spec = buildRegionSpecifier(RegionSpecifierType.REGION_NAME, bytes); } builder.addStates(RegionSpecifierAndState.newBuilder().setRegionSpecifier(spec) - .setState(state.convert()).build()); + .setState(state.convert()).build()); }); return builder.build(); } /** * Creates a protocol buffer GetTableDescriptorsRequest for a single table - * * @param tableName the table name * @return a GetTableDescriptorsRequest */ - public static GetTableDescriptorsRequest buildGetTableDescriptorsRequest( - final TableName tableName) { + public static GetTableDescriptorsRequest + buildGetTableDescriptorsRequest(final TableName tableName) { return GetTableDescriptorsRequest.newBuilder() - .addTableNames(ProtobufUtil.toProtoTableName(tableName)) - .build(); + .addTableNames(ProtobufUtil.toProtoTableName(tableName)).build(); } /** * Creates a protocol buffer IsMasterRunningRequest - * * @return a IsMasterRunningRequest */ public static IsMasterRunningRequest buildIsMasterRunningRequest() { @@ -1603,20 +1519,17 @@ public static IsMasterRunningRequest buildIsMasterRunningRequest() { /** * Creates a protocol buffer SetBalancerRunningRequest - * * @param on * @param synchronous * @return a SetBalancerRunningRequest */ - public static SetBalancerRunningRequest buildSetBalancerRunningRequest( - boolean on, + public static SetBalancerRunningRequest buildSetBalancerRunningRequest(boolean on, boolean synchronous) { return SetBalancerRunningRequest.newBuilder().setOn(on).setSynchronous(synchronous).build(); } /** * Creates a protocol buffer IsBalancerEnabledRequest - * * @return a IsBalancerEnabledRequest */ public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() { @@ -1625,35 +1538,30 @@ public static IsBalancerEnabledRequest buildIsBalancerEnabledRequest() { /** * Creates a protocol buffer ClearRegionBlockCacheRequest - * * @return a ClearRegionBlockCacheRequest */ public static ClearRegionBlockCacheRequest buildClearRegionBlockCacheRequest(List hris) { ClearRegionBlockCacheRequest.Builder builder = ClearRegionBlockCacheRequest.newBuilder(); - hris.forEach( - hri -> builder.addRegion( - buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName()) - )); + hris.forEach(hri -> builder + .addRegion(buildRegionSpecifier(RegionSpecifierType.REGION_NAME, hri.getRegionName()))); return builder.build(); } /** * Creates a protocol buffer GetClusterStatusRequest - * * @return A GetClusterStatusRequest */ public static GetClusterStatusRequest buildGetClusterStatusRequest(EnumSet

    - * In case the size of the pool is set to a non-zero positive number, that is - * used to cap the number of resources that a pool may contain for any given - * key. A size of {@link Integer#MAX_VALUE} is interpreted as an unbounded pool. + * In case the size of the pool is set to a non-zero positive number, that is used to cap the number + * of resources that a pool may contain for any given key. A size of {@link Integer#MAX_VALUE} is + * interpreted as an unbounded pool. *

    - * *

    - * PoolMap is thread-safe. It does not remove elements automatically. Unused resources - * must be closed and removed explicitly. + * PoolMap is thread-safe. It does not remove elements automatically. Unused resources must be + * closed and removed explicitly. *

    - * - * @param - * the type of the key to the resource - * @param - * the type of the resource being pooled + * @param the type of the key to the resource + * @param the type of the resource being pooled */ @InterfaceAudience.Private public class PoolMap { @@ -58,32 +49,33 @@ public class PoolMap { private final PoolType poolType; private final int poolMaxSize; - public PoolMap(PoolType poolType, int poolMaxSize) { - pools = new HashMap<>(); - this.poolType = poolType; - this.poolMaxSize = poolMaxSize; + public PoolMap(PoolType poolType, int poolMaxSize) { + pools = new HashMap<>(); + this.poolType = poolType; + this.poolMaxSize = poolMaxSize; } public V getOrCreate(K key, PoolResourceSupplier supplier) throws IOException { - synchronized (pools) { - Pool pool = pools.get(key); - - if (pool == null) { - pool = createPool(); - pools.put(key, pool); - } - - try { - return pool.getOrCreate(supplier); - } catch (IOException | RuntimeException | Error e) { - if (pool.size() == 0) { - pools.remove(key); - } - - throw e; - } - } + synchronized (pools) { + Pool pool = pools.get(key); + + if (pool == null) { + pool = createPool(); + pools.put(key, pool); + } + + try { + return pool.getOrCreate(supplier); + } catch (IOException | RuntimeException | Error e) { + if (pool.size() == 0) { + pools.remove(key); + } + + throw e; + } + } } + public boolean remove(K key, V value) { synchronized (pools) { Pool pool = pools.get(key); @@ -128,7 +120,7 @@ public void clear() { } public interface PoolResourceSupplier { - R get() throws IOException; + R get() throws IOException; } protected static V createResource(PoolResourceSupplier supplier) throws IOException { @@ -172,30 +164,25 @@ public static PoolType fuzzyMatch(String name) { protected Pool createPool() { switch (poolType) { - case RoundRobin: - return new RoundRobinPool<>(poolMaxSize); - case ThreadLocal: - return new ThreadLocalPool<>(); - default: - return new RoundRobinPool<>(poolMaxSize); + case RoundRobin: + return new RoundRobinPool<>(poolMaxSize); + case ThreadLocal: + return new ThreadLocalPool<>(); + default: + return new RoundRobinPool<>(poolMaxSize); } } /** - * The RoundRobinPool represents a {@link PoolMap.Pool}, which - * stores its resources in an {@link ArrayList}. It load-balances access to - * its resources by returning a different resource every time a given key is - * looked up. - * + * The RoundRobinPool represents a {@link PoolMap.Pool}, which stores its resources + * in an {@link ArrayList}. It load-balances access to its resources by returning a different + * resource every time a given key is looked up. *

    - * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of - * the pool is unbounded. Otherwise, it caps the number of resources in this - * pool to the (non-zero positive) value specified in {@link #maxSize}. + * If {@link #maxSize} is set to {@link Integer#MAX_VALUE}, then the size of the pool is + * unbounded. Otherwise, it caps the number of resources in this pool to the (non-zero positive) + * value specified in {@link #maxSize}. *

    - * - * @param - * the type of the resource - * + * @param the type of the resource */ @SuppressWarnings("serial") static class RoundRobinPool implements Pool { @@ -254,18 +241,15 @@ public int size() { } /** - * The ThreadLocalPool represents a {@link PoolMap.Pool} that - * works similarly to {@link ThreadLocal} class. It essentially binds the resource - * to the thread from which it is accessed. It doesn't remove resources when a thread exits, - * those resources must be closed manually. - * + * The ThreadLocalPool represents a {@link PoolMap.Pool} that works similarly to + * {@link ThreadLocal} class. It essentially binds the resource to the thread from which it is + * accessed. It doesn't remove resources when a thread exits, those resources must be closed + * manually. *

    - * Note that the size of the pool is essentially bounded by the number of threads - * that add resources to this pool. + * Note that the size of the pool is essentially bounded by the number of threads that add + * resources to this pool. *

    - * - * @param - * the type of the resource + * @param the type of the resource */ static class ThreadLocalPool implements Pool { private final Map resources; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java index 698330acc921..4143df9b75d9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/Writables.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,10 +24,9 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.DataInputBuffer; import org.apache.hadoop.io.Writable; +import org.apache.yetus.audience.InterfaceAudience; /** * Utility class with methods for manipulating Writable objects @@ -38,11 +36,11 @@ public class Writables { /** * @param w writable * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. + * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e * @see #getWritable(byte[], Writable) */ - public static byte [] getBytes(final Writable w) throws IOException { + public static byte[] getBytes(final Writable w) throws IOException { if (w == null) { throw new IllegalArgumentException("Writable cannot be null"); } @@ -64,20 +62,20 @@ public class Writables { * Put a bunch of Writables as bytes all into the one byte array. * @param ws writable * @return The bytes of w gotten by running its - * {@link Writable#write(java.io.DataOutput)} method. + * {@link Writable#write(java.io.DataOutput)} method. * @throws IOException e */ - public static byte [] getBytes(final Writable... ws) throws IOException { - List bytes = new ArrayList<>(ws.length); + public static byte[] getBytes(final Writable... ws) throws IOException { + List bytes = new ArrayList<>(ws.length); int size = 0; - for (Writable w: ws) { - byte [] b = getBytes(w); + for (Writable w : ws) { + byte[] b = getBytes(w); size += b.length; bytes.add(b); } - byte [] result = new byte[size]; + byte[] result = new byte[size]; int offset = 0; - for (byte [] b: bytes) { + for (byte[] b : bytes) { System.arraycopy(b, 0, result, offset, b.length); offset += b.length; } @@ -88,16 +86,14 @@ public class Writables { * Set bytes into the passed Writable by calling its * {@link Writable#readFields(java.io.DataInput)}. * @param bytes serialized bytes - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. + * @param w An empty Writable (usually made by calling the null-arg constructor). + * @return The passed Writable after its readFields has been called fed by the passed + * bytes array or IllegalArgumentException if passed null or an empty + * bytes array. * @throws IOException e * @throws IllegalArgumentException */ - public static Writable getWritable(final byte [] bytes, final Writable w) - throws IOException { + public static Writable getWritable(final byte[] bytes, final Writable w) throws IOException { return getWritable(bytes, 0, bytes.length, w); } @@ -107,20 +103,17 @@ public static Writable getWritable(final byte [] bytes, final Writable w) * @param bytes serialized bytes * @param offset offset into array * @param length length of data - * @param w An empty Writable (usually made by calling the null-arg - * constructor). - * @return The passed Writable after its readFields has been called fed - * by the passed bytes array or IllegalArgumentException - * if passed null or an empty bytes array. + * @param w An empty Writable (usually made by calling the null-arg constructor). + * @return The passed Writable after its readFields has been called fed by the passed + * bytes array or IllegalArgumentException if passed null or an empty + * bytes array. * @throws IOException e * @throws IllegalArgumentException */ - public static Writable getWritable(final byte [] bytes, final int offset, - final int length, final Writable w) - throws IOException { - if (bytes == null || length <=0) { - throw new IllegalArgumentException("Can't build a writable with empty " + - "bytes array"); + public static Writable getWritable(final byte[] bytes, final int offset, final int length, + final Writable w) throws IOException { + if (bytes == null || length <= 0) { + throw new IllegalArgumentException("Can't build a writable with empty " + "bytes array"); } if (w == null) { throw new IllegalArgumentException("Writable cannot be null"); @@ -136,26 +129,24 @@ public static Writable getWritable(final byte [] bytes, final int offset, } /** - * Copy one Writable to another. Copies bytes using data streams. + * Copy one Writable to another. Copies bytes using data streams. * @param src Source Writable * @param tgt Target Writable * @return The target Writable. * @throws IOException e */ - public static Writable copyWritable(final Writable src, final Writable tgt) - throws IOException { + public static Writable copyWritable(final Writable src, final Writable tgt) throws IOException { return copyWritable(getBytes(src), tgt); } /** - * Copy one Writable to another. Copies bytes using data streams. + * Copy one Writable to another. Copies bytes using data streams. * @param bytes Source Writable * @param tgt Target Writable * @return The target Writable. * @throws IOException e */ - public static Writable copyWritable(final byte [] bytes, final Writable tgt) - throws IOException { + public static Writable copyWritable(final byte[] bytes, final Writable tgt) throws IOException { DataInputStream dis = new DataInputStream(new ByteArrayInputStream(bytes)); try { tgt.readFields(dis); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java index 0447e31fdd09..59834b26961a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ReadOnlyZKClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -137,8 +137,8 @@ public ReadOnlyZKClient(Configuration conf) { conf.getInt(RECOVERY_RETRY_INTERVAL_MILLIS, DEFAULT_RECOVERY_RETRY_INTERVAL_MILLIS); this.keepAliveTimeMs = conf.getInt(KEEPALIVE_MILLIS, DEFAULT_KEEPALIVE_MILLIS); LOG.debug( - "Connect {} to {} with session timeout={}ms, retries {}, " + - "retry interval {}ms, keepAlive={}ms", + "Connect {} to {} with session timeout={}ms, retries {}, " + + "retry interval {}ms, keepAlive={}ms", getId(), connectString, sessionTimeoutMs, maxRetries, retryIntervalMs, keepAliveTimeMs); Threads.setDaemonThreadRunning(new Thread(this::run), "ReadOnlyZKClient-" + connectString + "@" + getId()); @@ -260,8 +260,8 @@ public CompletableFuture get(String path) { @Override protected void doExec(ZooKeeper zk) { - zk.getData(path, false, - (rc, path, ctx, data, stat) -> onComplete(zk, rc, data, true), null); + zk.getData(path, false, (rc, path, ctx, data, stat) -> onComplete(zk, rc, data, true), + null); } }); return future; @@ -311,7 +311,8 @@ private void closeZk() { private ZooKeeper getZk() throws IOException { // may be closed when session expired if (zookeeper == null || !zookeeper.getState().isAlive()) { - zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { + }); } return zookeeper; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java index 5072706cb5ae..f0fae958a66a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKMetadata.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java index 1affd9e627b9..bb4b1fee7ff5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZNodePaths.java @@ -43,9 +43,8 @@ public class ZNodePaths { public final String baseZNode; /** - * The prefix of meta znode. Does not include baseZNode. - * Its a 'prefix' because meta replica id integer can be tagged on the end (if - * no number present, it is 'default' replica). + * The prefix of meta znode. Does not include baseZNode. Its a 'prefix' because meta replica id + * integer can be tagged on the end (if no number present, it is 'default' replica). */ private final String metaZNodePrefix; @@ -117,31 +116,24 @@ public ZNodePaths(Configuration conf) { hfileRefsZNode = joinZNode(replicationZNode, conf.get("zookeeper.znode.replication.hfile.refs", "hfile-refs")); snapshotCleanupZNode = joinZNode(baseZNode, - conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); + conf.get("zookeeper.znode.snapshot.cleanup", DEFAULT_SNAPSHOT_CLEANUP_ZNODE)); } @Override public String toString() { - return new StringBuilder() - .append("ZNodePaths [baseZNode=").append(baseZNode) - .append(", rsZNode=").append(rsZNode) - .append(", drainingZNode=").append(drainingZNode) + return new StringBuilder().append("ZNodePaths [baseZNode=").append(baseZNode) + .append(", rsZNode=").append(rsZNode).append(", drainingZNode=").append(drainingZNode) .append(", masterAddressZNode=").append(masterAddressZNode) .append(", backupMasterAddressesZNode=").append(backupMasterAddressesZNode) - .append(", clusterStateZNode=").append(clusterStateZNode) - .append(", tableZNode=").append(tableZNode) - .append(", clusterIdZNode=").append(clusterIdZNode) - .append(", splitLogZNode=").append(splitLogZNode) - .append(", balancerZNode=").append(balancerZNode) - .append(", regionNormalizerZNode=").append(regionNormalizerZNode) - .append(", switchZNode=").append(switchZNode) - .append(", namespaceZNode=").append(namespaceZNode) - .append(", masterMaintZNode=").append(masterMaintZNode) - .append(", replicationZNode=").append(replicationZNode) - .append(", peersZNode=").append(peersZNode) - .append(", queuesZNode=").append(queuesZNode) - .append(", hfileRefsZNode=").append(hfileRefsZNode) - .append(", snapshotCleanupZNode=").append(snapshotCleanupZNode) + .append(", clusterStateZNode=").append(clusterStateZNode).append(", tableZNode=") + .append(tableZNode).append(", clusterIdZNode=").append(clusterIdZNode) + .append(", splitLogZNode=").append(splitLogZNode).append(", balancerZNode=") + .append(balancerZNode).append(", regionNormalizerZNode=").append(regionNormalizerZNode) + .append(", switchZNode=").append(switchZNode).append(", namespaceZNode=") + .append(namespaceZNode).append(", masterMaintZNode=").append(masterMaintZNode) + .append(", replicationZNode=").append(replicationZNode).append(", peersZNode=") + .append(peersZNode).append(", queuesZNode=").append(queuesZNode).append(", hfileRefsZNode=") + .append(hfileRefsZNode).append(", snapshotCleanupZNode=").append(snapshotCleanupZNode) .append("]").toString(); } @@ -174,9 +166,8 @@ public int getMetaReplicaIdFromPath(String path) { * @return replicaId */ public int getMetaReplicaIdFromZNode(String znode) { - return znode.equals(metaZNodePrefix)? - RegionInfo.DEFAULT_REPLICA_ID: - Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); + return znode.equals(metaZNodePrefix) ? RegionInfo.DEFAULT_REPLICA_ID + : Integer.parseInt(znode.substring(metaZNodePrefix.length() + 1)); } /** @@ -202,10 +193,10 @@ public boolean isClientReadable(String path) { // Developer notice: These znodes are world readable. DO NOT add more znodes here UNLESS // all clients need to access this data to work. Using zk for sharing data to clients (other // than service lookup case is not a recommended design pattern. - return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) || - path.equals(clusterIdZNode) || path.equals(rsZNode) || - // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not - path.equals(tableZNode) || path.startsWith(tableZNode + "/"); + return path.equals(baseZNode) || isMetaZNodePath(path) || path.equals(masterAddressZNode) + || path.equals(clusterIdZNode) || path.equals(rsZNode) || + // /hbase/table and /hbase/table/foo is allowed, /hbase/table-lock is not + path.equals(tableZNode) || path.startsWith(tableZNode + "/"); } public String getRsPath(ServerName sn) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java index dd26ed5f2091..6f1fd7f18635 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZooKeeperHelper.java @@ -19,13 +19,12 @@ import java.io.IOException; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.ZooKeeper; +import org.apache.hbase.thirdparty.com.google.common.base.Stopwatch; /** * Methods that help working with ZooKeeper @@ -39,11 +38,12 @@ private ZooKeeperHelper() { /** * Get a ZooKeeper instance and wait until it connected before returning. * @param sessionTimeoutMs Used as session timeout passed to the created ZooKeeper AND as the - * timeout to wait on connection establishment. + * timeout to wait on connection establishment. */ public static ZooKeeper getConnectedZooKeeper(String connectString, int sessionTimeoutMs) throws IOException { - ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> {}); + ZooKeeper zookeeper = new ZooKeeper(connectString, sessionTimeoutMs, e -> { + }); return ensureConnectedZooKeeper(zookeeper, sessionTimeoutMs); } @@ -58,12 +58,11 @@ public static ZooKeeper ensureConnectedZooKeeper(ZooKeeper zookeeper, int timeou } Stopwatch stopWatch = Stopwatch.createStarted(); // Make sure we are connected before we hand it back. - while(!zookeeper.getState().isConnected()) { + while (!zookeeper.getState().isConnected()) { Threads.sleep(1); if (stopWatch.elapsed(TimeUnit.MILLISECONDS) > timeout) { - throw new ZooKeeperConnectionException("Failed connect after waiting " + - stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + - zookeeper); + throw new ZooKeeperConnectionException("Failed connect after waiting " + + stopWatch.elapsed(TimeUnit.MILLISECONDS) + "ms (zk session timeout); " + zookeeper); } } return zookeeper; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java index e1b678574043..5164186e84ec 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,11 +40,10 @@ /** * Tests the HColumnDescriptor with appropriate arguments. - * * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 together with - * {@link HColumnDescriptor}. + * {@link HColumnDescriptor}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) @Deprecated public class TestHColumnDescriptor { @@ -54,14 +53,12 @@ public class TestHColumnDescriptor { @Rule public ExpectedException expectedEx = ExpectedException.none(); + @Test public void testPb() throws DeserializationException { - HColumnDescriptor hcd = new HColumnDescriptor( - new HColumnDescriptor(HConstants.CATALOG_FAMILY) - .setInMemory(true) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setBloomFilterType(BloomType.NONE) - .setCacheDataInL1(true)); + HColumnDescriptor hcd = new HColumnDescriptor(new HColumnDescriptor(HConstants.CATALOG_FAMILY) + .setInMemory(true).setScope(HConstants.REPLICATION_SCOPE_LOCAL) + .setBloomFilterType(BloomType.NONE).setCacheDataInL1(true)); final int v = 123; hcd.setBlocksize(v); hcd.setTimeToLive(v); @@ -82,7 +79,7 @@ public void testPb() throws DeserializationException { hcd.setMobThreshold(1000L); hcd.setDFSReplication((short) v); - byte [] bytes = hcd.toByteArray(); + byte[] bytes = hcd.toByteArray(); HColumnDescriptor deserializedHcd = HColumnDescriptor.parseFrom(bytes); assertTrue(hcd.equals(deserializedHcd)); assertEquals(v, hcd.getBlocksize()); @@ -134,11 +131,11 @@ public void testMobValuesInHColumnDescriptorShouldReadable() { // We unify the format of all values saved in the descriptor. // Each value is stored as bytes of string. String isMobString = PrettyPrinter.format(String.valueOf(isMob), - HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); + HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); String thresholdString = PrettyPrinter.format(String.valueOf(threshold), - HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); String policyString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(policy)), - HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); assertEquals(String.valueOf(isMob), isMobString); assertEquals(String.valueOf(threshold), thresholdString); assertEquals(String.valueOf(policy), policyString); @@ -146,16 +143,11 @@ public void testMobValuesInHColumnDescriptorShouldReadable() { @Test public void testClassMethodsAreBuilderStyle() { - /* HColumnDescriptor should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * HColumnDescriptor hcd - * = new HColumnDescriptor() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * HColumnDescriptor should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: HColumnDescriptor hcd = new HColumnDescriptor() + * .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" + * returns the declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(HColumnDescriptor.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index 94d05f77283c..6526cbc454a9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,11 +41,10 @@ /** * Test setting values in the descriptor - * * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 together with * {@link HTableDescriptor}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) @Deprecated public class TestHTableDescriptor { @@ -58,7 +57,7 @@ public class TestHTableDescriptor { @Rule public TestName name = new TestName(); - @Test (expected=IOException.class) + @Test(expected = IOException.class) public void testAddCoprocessorTwice() throws IOException { HTableDescriptor htd = new HTableDescriptor(TableName.META_TABLE_NAME); String cpName = "a.b.c.d"; @@ -110,7 +109,7 @@ public void testPb() throws DeserializationException, IOException { htd.setDurability(Durability.ASYNC_WAL); htd.setReadOnly(true); htd.setRegionReplication(2); - byte [] bytes = htd.toByteArray(); + byte[] bytes = htd.toByteArray(); HTableDescriptor deserializedHtd = HTableDescriptor.parseFrom(bytes); assertEquals(htd, deserializedHtd); assertEquals(v, deserializedHtd.getMaxFileSize()); @@ -121,7 +120,6 @@ public void testPb() throws DeserializationException, IOException { /** * Test cps in the table description. - * * @throws Exception if adding a coprocessor fails */ @Test @@ -139,7 +137,6 @@ public void testGetSetRemoveCP() throws Exception { /** * Test cps in the table description. - * * @throws Exception if adding a coprocessor fails */ @Test @@ -196,15 +193,15 @@ public void testAddGetRemoveString() { } String[] legalTableNames = { "foo", "with-dash_under.dot", "_under_start_ok", - "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02", - "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", - "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02", - "汉", "汉:字", "_字_", "foo:字", "foo.字", "字.foo"}; + "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02", + "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", + "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02", "汉", + "汉:字", "_字_", "foo:字", "foo.字", "字.foo" }; // Avoiding "zookeeper" in here as it's tough to encode in regex String[] illegalTableNames = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok", - "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", - "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2", String.valueOf((char)130), - String.valueOf((char)5), String.valueOf((char)65530)}; + "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", + "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2", String.valueOf((char) 130), + String.valueOf((char) 5), String.valueOf((char) 65530) }; @Test public void testLegalHTableNames() { @@ -241,8 +238,8 @@ public void testIllegalZooKeeperName() { public void testLegalHTableNamesRegex() { for (String tn : legalTableNames) { TableName tName = TableName.valueOf(tn); - assertTrue("Testing: '" + tn + "'", Pattern.matches(TableName.VALID_USER_TABLE_REGEX, - tName.getNameAsString())); + assertTrue("Testing: '" + tn + "'", + Pattern.matches(TableName.VALID_USER_TABLE_REGEX, tName.getNameAsString())); } } @@ -254,7 +251,7 @@ public void testIllegalHTableNamesRegex() { } } - /** + /** * Test default value handling for maxFileSize */ @Test @@ -292,16 +289,11 @@ public void testAddGetRemoveConfiguration() throws Exception { @Test public void testClassMethodsAreBuilderStyle() { - /* HTableDescriptor should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * HTableDescriptor htd - * = new HTableDescriptor() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * HTableDescriptor should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: HTableDescriptor htd = new HTableDescriptor() .setFoo(foo) + * .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" returns the + * declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(HTableDescriptor.class); @@ -325,7 +317,7 @@ public void testModifyFamily() { assertEquals(1, htd.getFamily(familyName).getDFSReplication()); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testModifyInexistentFamily() { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); byte[] familyName = Bytes.toBytes("cf"); @@ -333,7 +325,7 @@ public void testModifyInexistentFamily() { htd.modifyFamily(hcd); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testAddDuplicateFamilies() { HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name.getMethodName())); byte[] familyName = Bytes.toBytes("cf"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java index f43ce4a52bb5..3dd8f66a1856 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestRegionLocations.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestRegionLocations { @ClassRule @@ -56,7 +56,7 @@ public void testSizeMethods() { assertEquals(0, list.size()); assertEquals(0, list.numNonNullElements()); - list = hrll((HRegionLocation)null); + list = hrll((HRegionLocation) null); assertTrue(list.isEmpty()); assertEquals(1, list.size()); assertEquals(0, list.numNonNullElements()); @@ -99,7 +99,7 @@ private HRegionLocation hrl(HRegionInfo hri, ServerName sn, long seqNum) { return new HRegionLocation(hri, sn, seqNum); } - private RegionLocations hrll(HRegionLocation ... locations) { + private RegionLocations hrll(HRegionLocation... locations) { return new RegionLocations(locations); } @@ -168,7 +168,6 @@ public void testRemove() { assertNull(list.getRegionLocation(5)); assertNull(list.getRegionLocation(9)); - // test multi-element remove from multi element list list = hrll(hrl(info0, sn1), hrl(info1, sn1), hrl(info2, sn0), hrl(info9, sn0)); list = list.remove(hrl(info9, sn0)); @@ -334,12 +333,11 @@ public void testUpdateLocationWithDifferentRegionId() { assertEquals(3, list.size()); } - @Test public void testConstructWithNullElements() { // RegionLocations can contain null elements as well. These null elements can - RegionLocations list = new RegionLocations((HRegionLocation)null); + RegionLocations list = new RegionLocations((HRegionLocation) null); assertTrue(list.isEmpty()); assertEquals(1, list.size()); assertEquals(0, list.numNonNullElements()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImplTest.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImplTest.java index 0291bb79122a..9357b13b4c5c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImplTest.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/RpcRetryingCallerImplTest.java @@ -1,19 +1,19 @@ /* - Licensed to the Apache Software Foundation (ASF) under one - or more contributor license agreements. See the NOTICE file - distributed with this work for additional information - regarding copyright ownership. The ASF licenses this file - to you under the Apache License, Version 2.0 (the - "License"); you may not use this file except in compliance - with the License. You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; @@ -27,24 +27,24 @@ Licensed to the Apache Software Foundation (ASF) under one import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; -@Category({ ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class RpcRetryingCallerImplTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(RpcRetryingCallerImplTest.class); + HBaseClassTestRule.forClass(RpcRetryingCallerImplTest.class); @Test public void itTranslatesRemoteExceptionFromServiceException() throws DoNotRetryIOException { String message = "CDE for test"; ServiceException exception = new ServiceException( - new RemoteWithExtrasException(CallDroppedException.class.getName(), message, false)); + new RemoteWithExtrasException(CallDroppedException.class.getName(), message, false)); Throwable result = RpcRetryingCallerImpl.translateException(exception); - Assert.assertTrue("Expect unwrap CallDroppedException", - result instanceof CallDroppedException); + Assert.assertTrue("Expect unwrap CallDroppedException", result instanceof CallDroppedException); Assert.assertEquals(message, result.getMessage()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java index 44670dd9538d..7c34faf3fb00 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAdminRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -73,7 +73,7 @@ public class TestAsyncAdminRpcPriority { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncAdminRpcPriority.class); + HBaseClassTestRule.forClass(TestAsyncAdminRpcPriority.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -96,7 +96,7 @@ public void setUp() throws IOException { public Void answer(InvocationOnMock invocation) throws Throwable { RpcCallback done = invocation.getArgument(2); done.run(GetProcedureResultResponse.newBuilder() - .setState(GetProcedureResultResponse.State.FINISHED).build()); + .setState(GetProcedureResultResponse.State.FINISHED).build()); return null; } }).when(masterStub).getProcedureResult(any(HBaseRpcController.class), @@ -143,7 +143,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { any()); conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", - UserProvider.instantiate(CONF).getCurrent()) { + UserProvider.instantiate(CONF).getCurrent()) { @Override CompletableFuture getMasterStub() { @@ -170,9 +170,9 @@ public boolean matches(HBaseRpcController controller) { @Test public void testCreateNormalTable() { conn.getAdmin() - .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()) - .join(); + .createTable(TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()) + .join(); verify(masterStub, times(1)).createTable(assertPriority(NORMAL_QOS), any(CreateTableRequest.class), any()); } @@ -182,10 +182,10 @@ public void testCreateNormalTable() { @Test public void testCreateSystemTable() { conn.getAdmin() - .createTable(TableDescriptorBuilder - .newBuilder(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()) - .join(); + .createTable(TableDescriptorBuilder + .newBuilder(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()) + .join(); verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS), any(CreateTableRequest.class), any()); } @@ -195,7 +195,7 @@ public void testCreateSystemTable() { @Test public void testCreateMetaTable() { conn.getAdmin().createTable(TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of("cf")).build()).join(); verify(masterStub, times(1)).createTable(assertPriority(SYSTEMTABLE_QOS), any(CreateTableRequest.class), any()); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java index b2d5b872e757..7f9a2c2a0a1d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionConfiguration.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ public class TestAsyncConnectionConfiguration { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncConnectionConfiguration.class); + HBaseClassTestRule.forClass(TestAsyncConnectionConfiguration.class); @Test public void testDefaultReadWriteRpcTimeout() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java index 14bbc49f3486..eb034fc22974 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncConnectionTracing.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,12 +48,12 @@ public class TestAsyncConnectionTracing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncConnectionTracing.class); + HBaseClassTestRule.forClass(TestAsyncConnectionTracing.class); private static Configuration CONF = HBaseConfiguration.create(); private ServerName masterServer = - ServerName.valueOf("localhost", 12345, System.currentTimeMillis()); + ServerName.valueOf("localhost", 12345, System.currentTimeMillis()); private AsyncConnection conn; @@ -70,7 +70,7 @@ public CompletableFuture getActiveMaster() { } }; conn = new AsyncConnectionImpl(CONF, registry, "test", - UserProvider.instantiate(CONF).getCurrent()); + UserProvider.instantiate(CONF).getCurrent()); } @After @@ -81,14 +81,13 @@ public void tearDown() throws IOException { private void assertTrace(String methodName, ServerName serverName) { Waiter.waitFor(CONF, 1000, () -> traceRule.getSpans().stream() - .anyMatch(span -> span.getName().equals("AsyncConnection." + methodName) && - span.getKind() == SpanKind.INTERNAL && span.hasEnded())); + .anyMatch(span -> span.getName().equals("AsyncConnection." + methodName) + && span.getKind() == SpanKind.INTERNAL && span.hasEnded())); SpanData data = traceRule.getSpans().stream() - .filter(s -> s.getName().equals("AsyncConnection." + methodName)).findFirst().get(); + .filter(s -> s.getName().equals("AsyncConnection." + methodName)).findFirst().get(); assertEquals(StatusCode.OK, data.getStatus().getStatusCode()); if (serverName != null) { - assertEquals( - serverName.getServerName(), + assertEquals(serverName.getServerName(), data.getAttributes().get(HBaseSemanticAttributes.SERVER_NAME_KEY)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java index b306500c8b13..eca59331cc49 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncMetaRegionLocatorFailFast.java @@ -37,7 +37,7 @@ public class TestAsyncMetaRegionLocatorFailFast { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncMetaRegionLocatorFailFast.class); + HBaseClassTestRule.forClass(TestAsyncMetaRegionLocatorFailFast.class); private static Configuration CONF = HBaseConfiguration.create(); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index 1aa3bb70ab2d..a43f2ce59b74 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,9 +77,10 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; -@Category({ClientTests.class, LargeTests.class}) +@Category({ ClientTests.class, LargeTests.class }) public class TestAsyncProcess { @ClassRule @@ -87,8 +88,7 @@ public class TestAsyncProcess { HBaseClassTestRule.forClass(TestAsyncProcess.class); private static final Logger LOG = LoggerFactory.getLogger(TestAsyncProcess.class); - private static final TableName DUMMY_TABLE = - TableName.valueOf("DUMMY_TABLE"); + private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1"); private static final byte[] DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2"); private static final byte[] DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3"); @@ -114,8 +114,8 @@ public class TestAsyncProcess { private static final RegionInfo hri2r1 = RegionReplicaUtil.getRegionInfoForReplica(hri2, 1); private static final RegionLocations hrls1 = new RegionLocations(new HRegionLocation(hri1, sn), new HRegionLocation(hri1r1, sn2), new HRegionLocation(hri1r2, sn3)); - private static final RegionLocations hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2), - new HRegionLocation(hri2r1, sn3)); + private static final RegionLocations hrls2 = + new RegionLocations(new HRegionLocation(hri2, sn2), new HRegionLocation(hri2r1, sn3)); private static final RegionLocations hrls3 = new RegionLocations(new HRegionLocation(hri3, sn3), null); @@ -132,24 +132,25 @@ public void beforeEach() { this.CONF = new Configuration(); CONF.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, NB_RETRIES); this.CONNECTION_CONFIG = new ConnectionConfiguration(CONF); - this.RPC_TIMEOUT = CONF.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT); + this.RPC_TIMEOUT = + CONF.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT); this.OPERATION_TIMEOUT = CONF.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT); } static class CountingThreadFactory implements ThreadFactory { final AtomicInteger nbThreads; ThreadFactory realFactory = - new ThreadFactoryBuilder().setNameFormat("test-TestAsyncProcess-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(); + new ThreadFactoryBuilder().setNameFormat("test-TestAsyncProcess-pool-%d") + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build(); + @Override public Thread newThread(Runnable r) { nbThreads.incrementAndGet(); return realFactory.newThread(r); } - CountingThreadFactory(AtomicInteger nbThreads){ + CountingThreadFactory(AtomicInteger nbThreads) { this.nbThreads = nbThreads; } } @@ -163,61 +164,59 @@ static class MyAsyncProcess extends AsyncProcess { private long previousTimeout = -1; final ExecutorService service; + @Override - protected AsyncRequestFutureImpl createAsyncRequestFuture( - AsyncProcessTask task, List actions, long nonceGroup) { + protected AsyncRequestFutureImpl createAsyncRequestFuture(AsyncProcessTask task, + List actions, long nonceGroup) { // Test HTable has tableName of null, so pass DUMMY_TABLE - AsyncProcessTask wrap = new AsyncProcessTask(task){ + AsyncProcessTask wrap = new AsyncProcessTask(task) { @Override public TableName getTableName() { return DUMMY_TABLE; } }; - AsyncRequestFutureImpl r = new MyAsyncRequestFutureImpl<>( - wrap, actions, nonceGroup, this); + AsyncRequestFutureImpl r = + new MyAsyncRequestFutureImpl<>(wrap, actions, nonceGroup, this); allReqs.add(r); return r; } public MyAsyncProcess(ClusterConnection hc, Configuration conf) { - super(hc, conf, - new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); + super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); service = Executors.newFixedThreadPool(5); this.conf = conf; } public MyAsyncProcess(ClusterConnection hc, Configuration conf, AtomicInteger nbThreads) { super(hc, conf, new RpcRetryingCallerFactory(conf), new RpcControllerFactory(conf)); - service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, - new SynchronousQueue<>(), new CountingThreadFactory(nbThreads)); + service = new ThreadPoolExecutor(1, 20, 60, TimeUnit.SECONDS, new SynchronousQueue<>(), + new CountingThreadFactory(nbThreads)); } public AsyncRequestFuture submit(ExecutorService pool, TableName tableName, List rows, boolean atLeastOne, Batch.Callback callback, boolean needResults) throws InterruptedIOException { AsyncProcessTask task = AsyncProcessTask.newBuilder(callback) - .setPool(pool == null ? service : pool) - .setTableName(tableName) - .setRowAccess(rows) - .setSubmittedRows(atLeastOne ? SubmittedRows.AT_LEAST_ONE : SubmittedRows.NORMAL) - .setNeedResults(needResults) - .setRpcTimeout(conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, - HConstants.DEFAULT_HBASE_RPC_TIMEOUT)) - .setOperationTimeout(conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, - HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT)) - .build(); + .setPool(pool == null ? service : pool).setTableName(tableName).setRowAccess(rows) + .setSubmittedRows(atLeastOne ? SubmittedRows.AT_LEAST_ONE : SubmittedRows.NORMAL) + .setNeedResults(needResults) + .setRpcTimeout(conf.getInt(HConstants.HBASE_RPC_READ_TIMEOUT_KEY, + HConstants.DEFAULT_HBASE_RPC_TIMEOUT)) + .setOperationTimeout(conf.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, + HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT)) + .build(); return submit(task); } - public AsyncRequestFuture submit(TableName tableName, - final List rows, boolean atLeastOne, Batch.Callback callback, - boolean needResults) throws InterruptedIOException { + public AsyncRequestFuture submit(TableName tableName, final List rows, + boolean atLeastOne, Batch.Callback callback, boolean needResults) + throws InterruptedIOException { return submit(null, tableName, rows, atLeastOne, callback, needResults); } @Override public AsyncRequestFuture submit(AsyncProcessTask task) - throws InterruptedIOException { + throws InterruptedIOException { previousTimeout = task.getRpcTimeout(); // We use results in tests to check things, so override to always save them. AsyncProcessTask wrap = new AsyncProcessTask(task) { @@ -230,28 +229,26 @@ public boolean getNeedResults() { } @Override - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable callable, int rpcTimeout) { callsCt.incrementAndGet(); MultiServerCallable callable1 = (MultiServerCallable) callable; - final MultiResponse mr = createMultiResponse( - callable1.getMulti(), nbMultiResponse, nbActions, - new ResponseGenerator() { - @Override - public void addResponse(MultiResponse mr, byte[] regionName, Action a) { - if (Arrays.equals(FAILS, a.getAction().getRow())) { - mr.add(regionName, a.getOriginalIndex(), failure); - } else { - mr.add(regionName, a.getOriginalIndex(), success); - } + final MultiResponse mr = createMultiResponse(callable1.getMulti(), nbMultiResponse, nbActions, + new ResponseGenerator() { + @Override + public void addResponse(MultiResponse mr, byte[] regionName, Action a) { + if (Arrays.equals(FAILS, a.getAction().getRow())) { + mr.add(regionName, a.getOriginalIndex(), failure); + } else { + mr.add(regionName, a.getOriginalIndex(), success); } - }); + } + }); return new RpcRetryingCallerImpl(100, 500, 10, 9) { @Override public AbstractResponse callWithoutRetries(RetryingCallable callable, - int callTimeout) - throws IOException, RuntimeException { + int callTimeout) throws IOException, RuntimeException { try { // sleep one second in order for threadpool to start another thread instead of reusing // existing one. @@ -264,13 +261,13 @@ public AbstractResponse callWithoutRetries(RetryingCallable ca }; } - } static class MyAsyncRequestFutureImpl extends AsyncRequestFutureImpl { private final Map> heapSizesByServer = new HashMap<>(); - public MyAsyncRequestFutureImpl(AsyncProcessTask task, List actions, - long nonceGroup, AsyncProcess asyncProcess) { + + public MyAsyncRequestFutureImpl(AsyncProcessTask task, List actions, long nonceGroup, + AsyncProcess asyncProcess) { super(task, actions, nonceGroup, asyncProcess); } @@ -284,11 +281,10 @@ Map> getRequestHeapSize() { } @Override - SingleServerRequestRunnable createSingleServerRequest( - MultiAction multiAction, int numAttempt, ServerName server, - Set callsInProgress) { - SingleServerRequestRunnable rq = new SingleServerRequestRunnable( - multiAction, numAttempt, server, callsInProgress); + SingleServerRequestRunnable createSingleServerRequest(MultiAction multiAction, int numAttempt, + ServerName server, Set callsInProgress) { + SingleServerRequestRunnable rq = + new SingleServerRequestRunnable(multiAction, numAttempt, server, callsInProgress); List heapCount = heapSizesByServer.get(server); if (heapCount == null) { heapCount = new ArrayList<>(); @@ -299,16 +295,13 @@ SingleServerRequestRunnable createSingleServerRequest( } private long heapSizeOf(MultiAction multiAction) { - return multiAction.actions.values().stream() - .flatMap(v -> v.stream()) - .map(action -> action.getAction()) - .filter(row -> row instanceof Mutation) - .mapToLong(row -> ((Mutation) row).heapSize()) - .sum(); + return multiAction.actions.values().stream().flatMap(v -> v.stream()) + .map(action -> action.getAction()).filter(row -> row instanceof Mutation) + .mapToLong(row -> ((Mutation) row).heapSize()).sum(); } } - static class CallerWithFailure extends RpcRetryingCallerImpl{ + static class CallerWithFailure extends RpcRetryingCallerImpl { private final IOException e; @@ -319,13 +312,11 @@ public CallerWithFailure(IOException e) { @Override public AbstractResponse callWithoutRetries(RetryingCallable callable, - int callTimeout) - throws IOException, RuntimeException { + int callTimeout) throws IOException, RuntimeException { throw e; } } - static class AsyncProcessWithFailure extends MyAsyncProcess { private final IOException ioe; @@ -337,8 +328,8 @@ public AsyncProcessWithFailure(ClusterConnection hc, Configuration conf, IOExcep } @Override - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable callable, int rpcTimeout) { callsCt.incrementAndGet(); return new CallerWithFailure(ioe); } @@ -349,6 +340,7 @@ protected RpcRetryingCaller createCaller( */ static class MyClientBackoffPolicy implements ClientBackoffPolicy { private final Map count = new HashMap<>(); + @Override public long getBackoffTime(ServerName serverName, byte[] region, ServerStatistics stats) { AtomicInteger inc = count.get(serverName); @@ -390,26 +382,25 @@ public void setCallDelays(long primaryMs, long replicaMs) { } @Override - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable payloadCallable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable payloadCallable, int rpcTimeout) { MultiServerCallable callable = (MultiServerCallable) payloadCallable; - final MultiResponse mr = createMultiResponse( - callable.getMulti(), nbMultiResponse, nbActions, new ResponseGenerator() { - @Override - public void addResponse(MultiResponse mr, byte[] regionName, Action a) { - if (failures.contains(regionName)) { - mr.add(regionName, a.getOriginalIndex(), failure); - } else { - boolean isStale = !RegionReplicaUtil.isDefaultReplica(a.getReplicaId()); - mr.add(regionName, a.getOriginalIndex(), - Result.create(new Cell[0], null, isStale)); - } + final MultiResponse mr = createMultiResponse(callable.getMulti(), nbMultiResponse, nbActions, + new ResponseGenerator() { + @Override + public void addResponse(MultiResponse mr, byte[] regionName, Action a) { + if (failures.contains(regionName)) { + mr.add(regionName, a.getOriginalIndex(), failure); + } else { + boolean isStale = !RegionReplicaUtil.isDefaultReplica(a.getReplicaId()); + mr.add(regionName, a.getOriginalIndex(), Result.create(new Cell[0], null, isStale)); } - }); + } + }); // Currently AsyncProcess either sends all-replica, or all-primary request. final boolean isDefault = RegionReplicaUtil.isDefaultReplica( - callable.getMulti().actions.values().iterator().next().iterator().next().getReplicaId()); - final ServerName server = ((MultiServerCallable)callable).getServerName(); + callable.getMulti().actions.values().iterator().next().iterator().next().getReplicaId()); + final ServerName server = ((MultiServerCallable) callable).getServerName(); String debugMsg = "Call to " + server + ", primary=" + isDefault + " with " + callable.getMulti().actions.size() + " entries: "; for (byte[] region : callable.getMulti().actions.keySet()) { @@ -423,8 +414,7 @@ public void addResponse(MultiResponse mr, byte[] regionName, Action a) { return new RpcRetryingCallerImpl(100, 500, 10, 9) { @Override public MultiResponse callWithoutRetries(RetryingCallable callable, - int callTimeout) - throws IOException, RuntimeException { + int callTimeout) throws IOException, RuntimeException { long sleep = -1; if (isDefault) { Long customSleep = customPrimarySleepMs.get(server); @@ -444,8 +434,8 @@ public MultiResponse callWithoutRetries(RetryingCallable calla } } - static MultiResponse createMultiResponse(final MultiAction multi, - AtomicInteger nbMultiResponse, AtomicInteger nbActions, ResponseGenerator gen) { + static MultiResponse createMultiResponse(final MultiAction multi, AtomicInteger nbMultiResponse, + AtomicInteger nbActions, ResponseGenerator gen) { final MultiResponse mr = new MultiResponse(); nbMultiResponse.incrementAndGet(); for (Map.Entry> entry : multi.actions.entrySet()) { @@ -485,14 +475,14 @@ protected MyConnectionImpl(Configuration conf) throws IOException { } private static Configuration setupConf(Configuration conf) { - conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - TestRegistry.class, ConnectionRegistry.class); + conf.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, TestRegistry.class, + ConnectionRegistry.class); return conf; } @Override - public RegionLocations locateRegion(TableName tableName, - byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException { + public RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, + boolean retry, int replicaId) throws IOException { return new RegionLocations(loc1); } @@ -516,10 +506,10 @@ protected MyConnectionImpl2(List hrl, Configuration conf) throw } @Override - public RegionLocations locateRegion(TableName tableName, - byte[] row, boolean useCache, boolean retry, int replicaId) throws IOException { + public RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, + boolean retry, int replicaId) throws IOException { int i = 0; - for (HRegionLocation hr : hrl){ + for (HRegionLocation hr : hrl) { if (Arrays.equals(row, hr.getRegionInfo().getStartKey())) { usedRegions[i] = true; return new RegionLocations(hr); @@ -529,6 +519,7 @@ public RegionLocations locateRegion(TableName tableName, return null; } } + @Test public void testListRowAccess() { int count = 10; @@ -564,6 +555,7 @@ public void testListRowAccess() { assertEquals(0, taker.size()); assertEquals(count, takeCount); } + private static long calculateRequestCount(long putSizePerServer, long maxHeapSizePerRequest) { if (putSizePerServer <= maxHeapSizePerRequest) { return 1; @@ -601,8 +593,8 @@ public void testSubmitRandomSizeRequest() throws Exception { } long putsHeapSize = n % limit; long maxHeapSizePerRequest = putsHeapSize / requestCount; - LOG.info("[testSubmitRandomSizeRequest] maxHeapSizePerRequest=" + maxHeapSizePerRequest + - ", putsHeapSize=" + putsHeapSize); + LOG.info("[testSubmitRandomSizeRequest] maxHeapSizePerRequest=" + maxHeapSizePerRequest + + ", putsHeapSize=" + putsHeapSize); doSubmitRequest(maxHeapSizePerRequest, putsHeapSize); } @@ -630,7 +622,7 @@ private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) thro conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, - maxHeapSizePerRequest); + maxHeapSizePerRequest); // sn has two regions long putSizeSN = 0; @@ -652,11 +644,9 @@ private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) thro int minCountSnRequest = (int) calculateRequestCount(putSizeSN, maxHeapSizePerRequest); int minCountSn2Request = (int) calculateRequestCount(putSizeSN2, maxHeapSizePerRequest); - LOG.info("Total put count:" + puts.size() + ", putSizeSN:"+ putSizeSN - + ", putSizeSN2:" + putSizeSN2 - + ", maxHeapSizePerRequest:" + maxHeapSizePerRequest - + ", minCountSnRequest:" + minCountSnRequest - + ", minCountSn2Request:" + minCountSn2Request); + LOG.info("Total put count:" + puts.size() + ", putSizeSN:" + putSizeSN + ", putSizeSN2:" + + putSizeSN2 + ", maxHeapSizePerRequest:" + maxHeapSizePerRequest + ", minCountSnRequest:" + + minCountSnRequest + ", minCountSn2Request:" + minCountSn2Request); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); @@ -708,7 +698,7 @@ private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) thro } // restore config. conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, - defaultHeapSizePerRequest); + defaultHeapSizePerRequest); if (defaultClazz != null) { conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, defaultClazz); @@ -775,7 +765,6 @@ public void testSubmitBusyRegion() throws Exception { } } - @Test public void testSubmitBusyRegionServer() throws Exception { ClusterConnection conn = createHConnection(); @@ -786,7 +775,7 @@ public void testSubmitBusyRegionServer() throws Exception { SimpleRequestController.class.getName()); SimpleRequestController controller = (SimpleRequestController) ap.requestController; controller.taskCounterPerServer.put(sn2, - new AtomicInteger(controller.maxConcurrentTasksPerServer)); + new AtomicInteger(controller.maxConcurrentTasksPerServer)); List puts = new ArrayList<>(4); puts.add(createPut(1, true)); @@ -798,7 +787,7 @@ public void testSubmitBusyRegionServer() throws Exception { Assert.assertEquals(" puts=" + puts, 1, puts.size()); controller.taskCounterPerServer.put(sn2, - new AtomicInteger(controller.maxConcurrentTasksPerServer - 1)); + new AtomicInteger(controller.maxConcurrentTasksPerServer - 1)); ap.submit(null, DUMMY_TABLE, puts, false, null, false); Assert.assertTrue(puts.isEmpty()); if (defaultClazz != null) { @@ -823,16 +812,15 @@ public void testFail() throws Exception { Assert.assertEquals(1, ars.getErrors().exceptions.size()); Assert.assertTrue("was: " + ars.getErrors().exceptions.get(0), - failure.equals(ars.getErrors().exceptions.get(0))); + failure.equals(ars.getErrors().exceptions.get(0))); Assert.assertTrue("was: " + ars.getErrors().exceptions.get(0), - failure.equals(ars.getErrors().exceptions.get(0))); + failure.equals(ars.getErrors().exceptions.get(0))); Assert.assertEquals(1, ars.getFailedOperations().size()); Assert.assertTrue("was: " + ars.getFailedOperations().get(0), - p.equals(ars.getFailedOperations().get(0))); + p.equals(ars.getFailedOperations().get(0))); } - @Test public void testSubmitTrue() throws IOException { ClusterConnection conn = createHConnection(); @@ -849,9 +837,9 @@ public void testSubmitTrue() throws IOException { final AtomicBoolean checkPoint = new AtomicBoolean(false); final AtomicBoolean checkPoint2 = new AtomicBoolean(false); - Thread t = new Thread(){ + Thread t = new Thread() { @Override - public void run(){ + public void run() { Threads.sleep(1000); Assert.assertFalse(checkPoint.get()); // TODO: this is timing-dependent ai.decrementAndGet(); @@ -873,7 +861,7 @@ public void run(){ Assert.assertTrue(puts.isEmpty()); checkPoint.set(true); - while (!checkPoint2.get()){ + while (!checkPoint2.get()) { Threads.sleep(1); } if (defaultClazz != null) { @@ -986,7 +974,6 @@ public void testMaxTask() throws Exception { final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); SimpleRequestController controller = (SimpleRequestController) ap.requestController; - for (int i = 0; i < 1000; i++) { ap.incTaskCounters(Collections.singleton(Bytes.toBytes("dummy")), sn); } @@ -1029,7 +1016,7 @@ public void run() { ap.submit(null, DUMMY_TABLE, new ArrayList<>(), false, null, false); long end = EnvironmentEdgeManager.currentTime(); - //Adds 100 to secure us against approximate timing. + // Adds 100 to secure us against approximate timing. Assert.assertTrue(start + 100L + sleepTime > end); if (defaultClazz != null) { conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, @@ -1042,8 +1029,9 @@ private ClusterConnection createHConnection() throws IOException { setMockLocation(hc, DUMMY_BYTES_1, new RegionLocations(loc1)); setMockLocation(hc, DUMMY_BYTES_2, new RegionLocations(loc2)); setMockLocation(hc, DUMMY_BYTES_3, new RegionLocations(loc3)); - Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), - Mockito.anyBoolean())).thenReturn(Arrays.asList(loc1, loc2, loc3)); + Mockito + .when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())) + .thenReturn(Arrays.asList(loc1, loc2, loc3)); setMockLocation(hc, FAILS, new RegionLocations(loc2)); return hc; } @@ -1063,17 +1051,18 @@ private ClusterConnection createHConnectionWithReplicas() throws IOException { for (HRegionLocation loc : hrls3.getRegionLocations()) { locations.add(loc); } - Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), - Mockito.anyBoolean())).thenReturn(locations); + Mockito + .when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())) + .thenReturn(locations); return hc; } - private static void setMockLocation(ClusterConnection hc, byte[] row, - RegionLocations result) throws IOException { - Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), - Mockito.anyBoolean(), Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result); - Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), - Mockito.anyBoolean(), Mockito.anyBoolean())).thenReturn(result); + private static void setMockLocation(ClusterConnection hc, byte[] row, RegionLocations result) + throws IOException { + Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(), + Mockito.anyBoolean(), Mockito.anyInt())).thenReturn(result); + Mockito.when(hc.locateRegion(Mockito.eq(DUMMY_TABLE), Mockito.eq(row), Mockito.anyBoolean(), + Mockito.anyBoolean())).thenReturn(result); } private ClusterConnection createHConnectionCommon() { @@ -1096,7 +1085,7 @@ public void testHTablePutSuccess() throws Exception { Put put = createPut(1, true); Assert.assertEquals(conn.getConnectionConfiguration().getWriteBufferSize(), - ht.getWriteBufferSize()); + ht.getWriteBufferSize()); Assert.assertEquals(0, ht.getCurrentWriteBufferSize()); ht.mutate(put); ht.flush(); @@ -1108,25 +1097,17 @@ public void testSettingWriteBufferPeriodicFlushParameters() throws Exception { ClusterConnection conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); - checkPeriodicFlushParameters(conn, ap, - 1234, 1234, - 1234, 1234); - checkPeriodicFlushParameters(conn, ap, - 0, 0, - 0, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); - checkPeriodicFlushParameters(conn, ap, - -1234, 0, - -1234, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); - checkPeriodicFlushParameters(conn, ap, - 1, 1, - 1, BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); - } - - private void checkPeriodicFlushParameters(ClusterConnection conn, - MyAsyncProcess ap, - long setTO, long expectTO, - long setTT, long expectTT - ) { + checkPeriodicFlushParameters(conn, ap, 1234, 1234, 1234, 1234); + checkPeriodicFlushParameters(conn, ap, 0, 0, 0, + BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); + checkPeriodicFlushParameters(conn, ap, -1234, 0, -1234, + BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); + checkPeriodicFlushParameters(conn, ap, 1, 1, 1, + BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS); + } + + private void checkPeriodicFlushParameters(ClusterConnection conn, MyAsyncProcess ap, long setTO, + long expectTO, long setTT, long expectTT) { BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); // The BufferedMutatorParams does nothing with the value @@ -1142,7 +1123,7 @@ private void checkPeriodicFlushParameters(ClusterConnection conn, // The BufferedMutatorImpl corrects illegal values (direct via setter) BufferedMutatorImpl ht2 = - new BufferedMutatorImpl(conn, createBufferedMutatorParams(ap, DUMMY_TABLE), ap); + new BufferedMutatorImpl(conn, createBufferedMutatorParams(ap, DUMMY_TABLE), ap); ht2.setWriteBufferPeriodicFlush(setTO, setTT); Assert.assertEquals(expectTO, ht2.getWriteBufferPeriodicFlushTimeoutMs()); Assert.assertEquals(expectTT, ht2.getWriteBufferPeriodicFlushTimerTickMs()); @@ -1155,9 +1136,9 @@ public void testWriteBufferPeriodicFlushTimeoutMs() throws Exception { MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); - bufferParam.setWriteBufferPeriodicFlushTimeoutMs(1); // Flush ASAP + bufferParam.setWriteBufferPeriodicFlushTimeoutMs(1); // Flush ASAP bufferParam.setWriteBufferPeriodicFlushTimerTickMs(1); // Check every 100ms - bufferParam.writeBufferSize(10000); // Write buffer set to much larger than the single record + bufferParam.writeBufferSize(10000); // Write buffer set to much larger than the single record BufferedMutatorImpl ht = new BufferedMutatorImpl(conn, bufferParam, ap); @@ -1165,7 +1146,7 @@ public void testWriteBufferPeriodicFlushTimeoutMs() throws Exception { Assert.assertEquals(10000, ht.getWriteBufferSize()); Assert.assertEquals(1, ht.getWriteBufferPeriodicFlushTimeoutMs()); Assert.assertEquals(BufferedMutator.MIN_WRITE_BUFFER_PERIODIC_FLUSH_TIMERTICK_MS, - ht.getWriteBufferPeriodicFlushTimerTickMs()); + ht.getWriteBufferPeriodicFlushTimerTickMs()); Put put = createPut(1, true); @@ -1215,7 +1196,6 @@ public void testWriteBufferPeriodicFlushTimeoutMs() throws Exception { Assert.assertEquals(0, ht.getCurrentWriteBufferSize()); } - @Test public void testBufferedMutatorImplWithSharedPool() throws Exception { ClusterConnection conn = createHConnection(); @@ -1231,8 +1211,8 @@ public void testBufferedMutatorImplWithSharedPool() throws Exception { public void testFailedPutAndNewPut() throws Exception { ClusterConnection conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); - BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE) - .writeBufferSize(0); + BufferedMutatorParams bufferParam = + createBufferedMutatorParams(ap, DUMMY_TABLE).writeBufferSize(0); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap); Put p = createPut(1, false); @@ -1302,6 +1282,7 @@ public void testBatch() throws IOException, InterruptedException { Assert.assertEquals(success, res[5]); Assert.assertEquals(failure, res[6]); } + @Test public void testErrorsServers() throws IOException { Configuration configuration = new Configuration(CONF); @@ -1390,7 +1371,6 @@ public void testErrors() throws IOException { Assert.assertEquals(NB_RETRIES + 1, ap.callsCt.get()); } - @Test public void testCallQueueTooLarge() throws IOException { ClusterConnection conn = new MyConnectionImpl(CONF); @@ -1412,9 +1392,10 @@ public void testCallQueueTooLarge() throws IOException { // Checking that the ErrorsServers came into play and didn't make us stop immediately Assert.assertEquals(NB_RETRIES + 1, ap.callsCt.get()); } + /** - * This test simulates multiple regions on 2 servers. We should have 2 multi requests and - * 2 threads: 1 per server, this whatever the number of regions. + * This test simulates multiple regions on 2 servers. We should have 2 multi requests and 2 + * threads: 1 per server, this whatever the number of regions. */ @Test public void testThreadCreation() throws Exception { @@ -1422,8 +1403,8 @@ public void testThreadCreation() throws Exception { List hrls = new ArrayList<>(NB_REGS); List gets = new ArrayList<>(NB_REGS); for (int i = 0; i < NB_REGS; i++) { - HRegionInfo hri = new HRegionInfo( - DUMMY_TABLE, Bytes.toBytes(i * 10L), Bytes.toBytes(i * 10L + 9L), false, i); + HRegionInfo hri = new HRegionInfo(DUMMY_TABLE, Bytes.toBytes(i * 10L), + Bytes.toBytes(i * 10L + 9L), false, i); HRegionLocation hrl = new HRegionLocation(hri, i % 2 == 0 ? sn : sn2); hrls.add(hrl); @@ -1442,7 +1423,7 @@ public void testThreadCreation() throws Exception { Assert.assertEquals("1 thread per server", 2, con.nbThreads.get()); int nbReg = 0; - for (int i =0; i rows) - throws InterruptedIOException { - return submit(AsyncProcessTask.newBuilder() - .setPool(service) - .setTableName(tableName) - .setRowAccess(rows) - .setSubmittedRows(AsyncProcessTask.SubmittedRows.NORMAL) - .setNeedResults(true) - .setRpcTimeout(HConstants.DEFAULT_HBASE_RPC_TIMEOUT) - .setOperationTimeout(HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT) - .build()); + throws InterruptedIOException { + return submit(AsyncProcessTask.newBuilder().setPool(service).setTableName(tableName) + .setRowAccess(rows).setSubmittedRows(AsyncProcessTask.SubmittedRows.NORMAL) + .setNeedResults(true).setRpcTimeout(HConstants.DEFAULT_HBASE_RPC_TIMEOUT) + .setOperationTimeout(HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT).build()); } @Override - protected RpcRetryingCaller createCaller( - CancellableRegionServerCallable callable, int rpcTimeout) { + protected RpcRetryingCaller + createCaller(CancellableRegionServerCallable callable, int rpcTimeout) { MultiServerCallable callable1 = (MultiServerCallable) callable; MultiResponse mr = new MultiResponse(); callable1.getMulti().actions.forEach((regionName, actions) -> { @@ -236,7 +225,7 @@ protected RpcRetryingCaller createCaller( return new RpcRetryingCallerImpl(100, 500, 0, 9) { @Override public AbstractResponse callWithoutRetries(RetryingCallable callable, - int callTimeout) { + int callTimeout) { try { // sleep one second in order for threadpool to start another thread instead of reusing // existing one. diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java index 47e09f8113fe..5eac67b936a6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncRegionLocatorTracing.java @@ -30,6 +30,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.hasItem; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -60,6 +61,7 @@ import org.junit.experimental.categories.Category; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, MediumTests.class }) @@ -68,7 +70,7 @@ public class TestAsyncRegionLocatorTracing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncRegionLocatorTracing.class); + HBaseClassTestRule.forClass(TestAsyncRegionLocatorTracing.class); private static final Configuration CONF = HBaseConfiguration.create(); @@ -83,12 +85,12 @@ public class TestAsyncRegionLocatorTracing { public void setUp() throws IOException { RegionInfo metaRegionInfo = RegionInfoBuilder.newBuilder(TableName.META_TABLE_NAME).build(); locs = new RegionLocations( - new HRegionLocation(metaRegionInfo, - ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis())), - new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 1), - ServerName.valueOf("127.0.0.2", 12345, System.currentTimeMillis())), - new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 2), - ServerName.valueOf("127.0.0.3", 12345, System.currentTimeMillis()))); + new HRegionLocation(metaRegionInfo, + ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis())), + new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 1), + ServerName.valueOf("127.0.0.2", 12345, System.currentTimeMillis())), + new HRegionLocation(RegionReplicaUtil.getRegionInfoForReplica(metaRegionInfo, 2), + ServerName.valueOf("127.0.0.3", 12345, System.currentTimeMillis()))); conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF) { @Override @@ -110,28 +112,22 @@ private SpanData waitSpan(String name) { private SpanData waitSpan(Matcher matcher) { Matcher spanLocator = allOf(matcher, hasEnded()); try { - Waiter.waitFor(CONF, 1000, new MatcherPredicate<>( - "waiting for span", - () -> traceRule.getSpans(), hasItem(spanLocator))); + Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span", + () -> traceRule.getSpans(), hasItem(spanLocator))); } catch (AssertionError e) { LOG.error("AssertionError while waiting for matching span. Span reservoir contains: {}", traceRule.getSpans()); throw e; } - return traceRule.getSpans() - .stream() - .filter(spanLocator::matches) - .findFirst() - .orElseThrow(AssertionError::new); + return traceRule.getSpans().stream().filter(spanLocator::matches).findFirst() + .orElseThrow(AssertionError::new); } @Test public void testClearCache() { conn.getLocator().clearCache(); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn))); } @@ -140,22 +136,20 @@ public void testClearCacheServerName() { ServerName sn = ServerName.valueOf("127.0.0.1", 12345, System.currentTimeMillis()); conn.getLocator().clearCache(sn); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - hasAttributes(containsEntry("db.hbase.server.name", sn.getServerName())))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + hasAttributes(containsEntry("db.hbase.server.name", sn.getServerName())))); } @Test public void testClearCacheTableName() { conn.getLocator().clearCache(TableName.META_TABLE_NAME); SpanData span = waitSpan("AsyncRegionLocator.clearCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME))); } @Test @@ -163,13 +157,11 @@ public void testGetRegionLocation() { conn.getLocator().getRegionLocation(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocation"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( - containsEntryWithStringValuesOf("db.hbase.regions", + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME), + hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", locs.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); } @@ -178,16 +170,12 @@ public void testGetRegionLocations() { conn.getLocator().getRegionLocations(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW, RegionLocateType.CURRENT, false, TimeUnit.SECONDS.toNanos(1)).join(); SpanData span = waitSpan("AsyncRegionLocator.getRegionLocations"); - String[] expectedRegions = Arrays.stream(locs.getRegionLocations()) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .toArray(String[]::new); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + String[] expectedRegions = + Arrays.stream(locs.getRegionLocations()).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( + buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java index 1e6a4b345e76..992db0cc719f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,6 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.Cell.Type; @@ -68,7 +67,9 @@ import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -84,7 +85,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse; - /** * Confirm that we will set the priority in {@link HBaseRpcController} for several table operations. */ @@ -93,7 +93,7 @@ public class TestAsyncTableRpcPriority { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableRpcPriority.class); + HBaseClassTestRule.forClass(TestAsyncTableRpcPriority.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -115,8 +115,7 @@ public void setUp() throws IOException { @Override public Void answer(InvocationOnMock invocation) throws Throwable { - ClientProtos.MultiResponse resp = - ClientProtos.MultiResponse.newBuilder() + ClientProtos.MultiResponse resp = ClientProtos.MultiResponse.newBuilder() .addRegionActionResult(RegionActionResult.newBuilder().addResultOrException( ResultOrException.newBuilder().setResult(ProtobufUtil.toResult(new Result())))) .build(); @@ -136,11 +135,11 @@ public Void answer(InvocationOnMock invocation) throws Throwable { ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) - .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()).build(); + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() - .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); + .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; default: resp = MutateResponse.getDefaultInstance(); @@ -161,24 +160,24 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", - UserProvider.instantiate(CONF).getCurrent()) { + UserProvider.instantiate(CONF).getCurrent()) { @Override AsyncRegionLocator getLocator() { AsyncRegionLocator locator = mock(AsyncRegionLocator.class); Answer> answer = - new Answer>() { - - @Override - public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { - TableName tableName = invocation.getArgument(0); - RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); - ServerName serverName = ServerName.valueOf("rs", 16010, 12345); - HRegionLocation loc = new HRegionLocation(info, serverName); - return CompletableFuture.completedFuture(loc); - } - }; + new Answer>() { + + @Override + public CompletableFuture answer(InvocationOnMock invocation) + throws Throwable { + TableName tableName = invocation.getArgument(0); + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); + ServerName serverName = ServerName.valueOf("rs", 16010, 12345); + HRegionLocation loc = new HRegionLocation(info, serverName); + return CompletableFuture.completedFuture(loc); + } + }; doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), any(RegionLocateType.class), anyLong()); doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), @@ -216,7 +215,7 @@ public boolean matches(ScanRequest request) { @Test public void testGet() { conn.getTable(TableName.valueOf(name.getMethodName())) - .get(new Get(Bytes.toBytes(0)).setPriority(11)).join(); + .get(new Get(Bytes.toBytes(0)).setPriority(11)).join(); verify(stub, times(1)).get(assertPriority(11), any(GetRequest.class), any()); } @@ -229,7 +228,7 @@ public void testGetNormalTable() { @Test public void testGetSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .get(new Get(Bytes.toBytes(0))).join(); + .get(new Get(Bytes.toBytes(0))).join(); verify(stub, times(1)).get(assertPriority(SYSTEMTABLE_QOS), any(GetRequest.class), any()); } @@ -241,54 +240,53 @@ public void testGetMetaTable() { @Test public void testPut() { - conn - .getTable(TableName.valueOf(name.getMethodName())).put(new Put(Bytes.toBytes(0)) + conn.getTable(TableName.valueOf(name.getMethodName())).put(new Put(Bytes.toBytes(0)) .setPriority(12).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .join(); verify(stub, times(1)).mutate(assertPriority(12), any(MutateRequest.class), any()); } @Test public void testPutNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())).put(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testPutSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .put(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), - Bytes.toBytes("v"))) - .join(); + .put(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), + Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testPutMetaTable() { conn.getTable(TableName.META_TABLE_NAME).put(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testDelete() { conn.getTable(TableName.valueOf(name.getMethodName())) - .delete(new Delete(Bytes.toBytes(0)).setPriority(13)).join(); + .delete(new Delete(Bytes.toBytes(0)).setPriority(13)).join(); verify(stub, times(1)).mutate(assertPriority(13), any(MutateRequest.class), any()); } @Test public void testDeleteNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())).delete(new Delete(Bytes.toBytes(0))) - .join(); + .join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testDeleteSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .delete(new Delete(Bytes.toBytes(0))).join(); + .delete(new Delete(Bytes.toBytes(0))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @@ -300,154 +298,155 @@ public void testDeleteMetaTable() { @Test public void testAppend() { - conn - .getTable(TableName.valueOf(name.getMethodName())).append(new Append(Bytes.toBytes(0)) + conn.getTable(TableName.valueOf(name.getMethodName())).append(new Append(Bytes.toBytes(0)) .setPriority(14).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .join(); verify(stub, times(1)).mutate(assertPriority(14), any(MutateRequest.class), any()); } @Test public void testAppendNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())).append(new Append(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testAppendSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .append(new Append(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), - Bytes.toBytes("v"))) - .join(); + .append(new Append(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), + Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testAppendMetaTable() { conn.getTable(TableName.META_TABLE_NAME).append(new Append(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testIncrement() { conn.getTable(TableName.valueOf(name.getMethodName())).increment(new Increment(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).setPriority(15)).join(); + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).setPriority(15)).join(); verify(stub, times(1)).mutate(assertPriority(15), any(MutateRequest.class), any()); } @Test public void testIncrementNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())) - .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); + .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testIncrementSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); + .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testIncrementMetaTable() { conn.getTable(TableName.META_TABLE_NAME) - .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); + .incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndPut() { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifNotExists() - .thenPut(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")).setPriority(16)) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifNotExists() + .thenPut(new Put(Bytes.toBytes(0)) + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .setPriority(16)) + .join(); verify(stub, times(1)).mutate(assertPriority(16), any(MutateRequest.class), any()); } @Test public void testCheckAndPutNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifNotExists().thenPut(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), - Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifNotExists().thenPut(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), + Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndPutSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifNotExists().thenPut(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), - Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifNotExists().thenPut(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), + Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndPutMetaTable() { conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndDelete() { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0)).setPriority(17)).join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0)).setPriority(17)) + .join(); verify(stub, times(1)).mutate(assertPriority(17), any(MutateRequest.class), any()); } @Test public void testCheckAndDeleteNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0))).join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0))).join(); verify(stub, times(1)).mutate(assertPriority(NORMAL_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndDeleteSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0))).join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")).thenDelete(new Delete(Bytes.toBytes(0))).join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndDeleteMetaTable() { conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .join(); + .qualifier(Bytes.toBytes("cq")).ifNotExists().thenPut(new Put(Bytes.toBytes(0)) + .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .join(); verify(stub, times(1)).mutate(assertPriority(SYSTEMTABLE_QOS), any(MutateRequest.class), any()); } @Test public void testCheckAndMutate() throws IOException { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")).thenMutate(new RowMutations(Bytes.toBytes(0)) - .add((Mutation) new Delete(Bytes.toBytes(0)).setPriority(18))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")).thenMutate(new RowMutations(Bytes.toBytes(0)) + .add((Mutation) new Delete(Bytes.toBytes(0)).setPriority(18))) + .join(); verify(stub, times(1)).multi(assertPriority(18), any(ClientProtos.MultiRequest.class), any()); } @Test public void testCheckAndMutateNormalTable() throws IOException { conn.getTable(TableName.valueOf(name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")) - .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")) + .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) + .join(); verify(stub, times(1)).multi(assertPriority(NORMAL_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -455,10 +454,10 @@ public void testCheckAndMutateNormalTable() throws IOException { @Test public void testCheckAndMutateSystemTable() throws IOException { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")) - .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) - .join(); + .checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")) + .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) + .join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -466,9 +465,9 @@ public void testCheckAndMutateSystemTable() throws IOException { @Test public void testCheckAndMutateMetaTable() throws IOException { conn.getTable(TableName.META_TABLE_NAME).checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v")) - .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) - .join(); + .qualifier(Bytes.toBytes("cq")).ifEquals(Bytes.toBytes("v")) + .thenMutate(new RowMutations(Bytes.toBytes(0)).add((Mutation) new Delete(Bytes.toBytes(0)))) + .join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -486,26 +485,24 @@ public Void answer(InvocationOnMock invocation) throws Throwable { ScanRequest req = invocation.getArgument(1); RpcCallback done = invocation.getArgument(2); if (!req.hasScannerId()) { - done.run( - ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true).build()); + done.run(ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) + .setMoreResultsInRegion(true).setMoreResults(true).build()); } else { if (req.hasRenew() && req.getRenew()) { future.complete(null); } assertFalse("close scanner should not come in with scan priority " + scanPriority, - req.hasCloseScanner() && req.getCloseScanner()); + req.hasCloseScanner() && req.getCloseScanner()); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Type.Put) + .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); - done.run( - ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true) - .addResults(ProtobufUtil.toResult(result)).build()); + done.run(ScanResponse.newBuilder().setScannerId(scannerId).setTtl(800) + .setMoreResultsInRegion(true).setMoreResults(true) + .addResults(ProtobufUtil.toResult(result)).build()); } }); return null; @@ -521,10 +518,9 @@ public Void answer(InvocationOnMock invocation) throws Throwable { ScanRequest req = invocation.getArgument(1); RpcCallback done = invocation.getArgument(2); assertTrue("close request should have scannerId", req.hasScannerId()); - assertEquals("close request's scannerId should match", scannerId, - req.getScannerId()); + assertEquals("close request's scannerId should match", scannerId, req.getScannerId()); assertTrue("close request should have closerScanner set", - req.hasCloseScanner() && req.getCloseScanner()); + req.hasCloseScanner() && req.getCloseScanner()); done.run(ScanResponse.getDefaultInstance()); }); @@ -549,8 +545,8 @@ public void testScanNormalTable() throws Exception { @Test public void testScanSystemTable() throws Exception { CompletableFuture renewFuture = mockScanReturnRenewFuture(SYSTEMTABLE_QOS); - testForTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName()), - renewFuture, Optional.empty()); + testForTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName()), renewFuture, + Optional.empty()); } @Test @@ -560,7 +556,7 @@ public void testScanMetaTable() throws Exception { } private void testForTable(TableName tableName, CompletableFuture renewFuture, - Optional priority) throws Exception { + Optional priority) throws Exception { Scan scan = new Scan().setCaching(1).setMaxResultSize(1); priority.ifPresent(scan::setPriority); @@ -584,7 +580,7 @@ private void testForTable(TableName tableName, CompletableFuture renewFutu @Test public void testBatchNormalTable() { conn.getTable(TableName.valueOf(name.getMethodName())) - .batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); + .batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); verify(stub, times(1)).multi(assertPriority(NORMAL_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -592,7 +588,7 @@ public void testBatchNormalTable() { @Test public void testBatchSystemTable() { conn.getTable(TableName.valueOf(SYSTEM_NAMESPACE_NAME_STR, name.getMethodName())) - .batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); + .batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); } @@ -600,7 +596,7 @@ public void testBatchSystemTable() { @Test public void testBatchMetaTable() { conn.getTable(TableName.META_TABLE_NAME).batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))) - .join(); + .join(); verify(stub, times(1)).multi(assertPriority(SYSTEMTABLE_QOS), any(ClientProtos.MultiRequest.class), any()); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java index 04df972d7a1a..51e7c1b755b9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncTableTracing.java @@ -36,6 +36,7 @@ import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -75,8 +76,10 @@ import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; import org.apache.hbase.thirdparty.com.google.protobuf.RpcCallback; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -97,7 +100,7 @@ public class TestAsyncTableTracing { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestAsyncTableTracing.class); + HBaseClassTestRule.forClass(TestAsyncTableTracing.class); private static Configuration CONF = HBaseConfiguration.create(); @@ -122,18 +125,18 @@ public Void answer(InvocationOnMock invocation) throws Throwable { RpcCallback done = invocation.getArgument(2); if (!req.hasScannerId()) { done.run(ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true).build()); + .setMoreResultsInRegion(true).setMoreResults(true).build()); } else { if (req.hasCloseScanner() && req.getCloseScanner()) { done.run(ScanResponse.getDefaultInstance()); } else { Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) - .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) - .setValue(Bytes.toBytes("v")).build(); + .setType(Cell.Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) + .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); ScanResponse.Builder builder = ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .addResults(ProtobufUtil.toResult(result)); + .addResults(ProtobufUtil.toResult(result)); if (req.getLimitOfRows() == 1) { builder.setMoreResultsInRegion(false).setMoreResults(false); } else { @@ -175,13 +178,13 @@ public Void answer(InvocationOnMock invocation) throws Throwable { case INCREMENT: ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put).setRow(req.getRow().toByteArray()) - .setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()).build(); + Cell cell = + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() - .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); + .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; default: resp = MutateResponse.getDefaultInstance(); @@ -202,25 +205,24 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class), any()); final User user = UserProvider.instantiate(CONF).getCurrent(); - conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", - user) { + conn = new AsyncConnectionImpl(CONF, new DoNothingConnectionRegistry(CONF), "test", user) { @Override AsyncRegionLocator getLocator() { AsyncRegionLocator locator = mock(AsyncRegionLocator.class); Answer> answer = - new Answer>() { - - @Override - public CompletableFuture answer(InvocationOnMock invocation) - throws Throwable { - TableName tableName = invocation.getArgument(0); - RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); - ServerName serverName = ServerName.valueOf("rs", 16010, 12345); - HRegionLocation loc = new HRegionLocation(info, serverName); - return CompletableFuture.completedFuture(loc); - } - }; + new Answer>() { + + @Override + public CompletableFuture answer(InvocationOnMock invocation) + throws Throwable { + TableName tableName = invocation.getArgument(0); + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); + ServerName serverName = ServerName.valueOf("rs", 16010, 12345); + HRegionLocation loc = new HRegionLocation(info, serverName); + return CompletableFuture.completedFuture(loc); + } + }; doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), any(RegionLocateType.class), anyLong()); doAnswer(answer).when(locator).getRegionLocation(any(TableName.class), any(byte[].class), @@ -249,26 +251,19 @@ private void assertTrace(String tableOperation, Matcher matcher) { // n.b. this method implementation must match the one of the same name found in // TestHTableTracing final TableName tableName = table.getName(); - final Matcher spanLocator = allOf( - hasName(containsString(tableOperation)), hasEnded()); + final Matcher spanLocator = + allOf(hasName(containsString(tableOperation)), hasEnded()); final String expectedName = tableOperation + " " + tableName.getNameWithNamespaceInclAsString(); - Waiter.waitFor(CONF, 1000, new MatcherPredicate<>( - "waiting for span to emit", - () -> traceRule.getSpans(), hasItem(spanLocator))); - List candidateSpans = traceRule.getSpans() - .stream() - .filter(spanLocator::matches) - .collect(Collectors.toList()); + Waiter.waitFor(CONF, 1000, new MatcherPredicate<>("waiting for span to emit", + () -> traceRule.getSpans(), hasItem(spanLocator))); + List candidateSpans = + traceRule.getSpans().stream().filter(spanLocator::matches).collect(Collectors.toList()); assertThat(candidateSpans, hasSize(1)); SpanData data = candidateSpans.iterator().next(); - assertThat(data, allOf( - hasName(expectedName), - hasKind(SpanKind.CLIENT), - hasStatusWithCode(StatusCode.OK), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(tableName), - matcher)); + assertThat(data, + allOf(hasName(expectedName), hasKind(SpanKind.CLIENT), hasStatusWithCode(StatusCode.OK), + buildConnectionAttributesMatcher(conn), buildTableAttributesMatcher(tableName), matcher)); } @Test @@ -306,16 +301,16 @@ public void testAppend() { @Test public void testIncrement() { table - .increment( - new Increment(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1)) - .join(); + .increment( + new Increment(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1)) + .join(); assertTrace("INCREMENT"); } @Test public void testIncrementColumnValue1() { table.incrementColumnValue(Bytes.toBytes(0), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1) - .join(); + .join(); assertTrace("INCREMENT"); } @@ -329,38 +324,37 @@ public void testIncrementColumnValue2() { @Test public void testCheckAndMutate() { table.checkAndMutate(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0)))).join(); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0)))).join(); assertTrace("CHECK_AND_MUTATE"); } @Test public void testCheckAndMutateList() { CompletableFuture - .allOf(table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0))))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + .allOf(table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0))))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test public void testCheckAndMutateAll() { table.checkAndMutateAll(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0))))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0))))).join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } private void testCheckAndMutateBuilder(Row op) { AsyncTable.CheckAndMutateBuilder builder = - table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")) - .qualifier(Bytes.toBytes("cq")) - .ifEquals(Bytes.toBytes("v")); + table.checkAndMutate(Bytes.toBytes(0), Bytes.toBytes("cf")).qualifier(Bytes.toBytes("cq")) + .ifEquals(Bytes.toBytes("v")); if (op instanceof Put) { Put put = (Put) op; builder.thenPut(put).join(); @@ -378,8 +372,8 @@ private void testCheckAndMutateBuilder(Row op) { @Test public void testCheckAndMutateBuilderThenPut() { - Put put = new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v")); + Put put = new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), + Bytes.toBytes("v")); testCheckAndMutateBuilder(put); } @@ -390,17 +384,18 @@ public void testCheckAndMutateBuilderThenDelete() { @Test public void testCheckAndMutateBuilderThenMutations() throws IOException { - RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add((Mutation) (new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v")))) - .add((Mutation) new Delete(Bytes.toBytes(0))); + RowMutations mutations = + new RowMutations(Bytes.toBytes(0)) + .add((Mutation) (new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), + Bytes.toBytes("cq"), Bytes.toBytes("v")))) + .add((Mutation) new Delete(Bytes.toBytes(0))); testCheckAndMutateBuilder(mutations); } private void testCheckAndMutateWithFilterBuilder(Row op) { // use of `PrefixFilter` is completely arbitrary here. AsyncTable.CheckAndMutateWithFilterBuilder builder = - table.checkAndMutate(Bytes.toBytes(0), new PrefixFilter(Bytes.toBytes(0))); + table.checkAndMutate(Bytes.toBytes(0), new PrefixFilter(Bytes.toBytes(0))); if (op instanceof Put) { Put put = (Put) op; builder.thenPut(put).join(); @@ -418,8 +413,8 @@ private void testCheckAndMutateWithFilterBuilder(Row op) { @Test public void testCheckAndMutateWithFilterBuilderThenPut() { - Put put = new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v")); + Put put = new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), + Bytes.toBytes("v")); testCheckAndMutateWithFilterBuilder(put); } @@ -430,19 +425,21 @@ public void testCheckAndMutateWithFilterBuilderThenDelete() { @Test public void testCheckAndMutateWithFilterBuilderThenMutations() throws IOException { - RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add((Mutation) new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("f"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add((Mutation) new Delete(Bytes.toBytes(0))); + RowMutations mutations = + new RowMutations(Bytes.toBytes(0)) + .add((Mutation) new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("f"), + Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .add((Mutation) new Delete(Bytes.toBytes(0))); testCheckAndMutateWithFilterBuilder(mutations); } @Test public void testMutateRow() throws IOException { - final RowMutations mutations = new RowMutations(Bytes.toBytes(0)) - .add((Mutation) new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))) - .add((Mutation) new Delete(Bytes.toBytes(0))); + final RowMutations mutations = + new RowMutations(Bytes.toBytes(0)) + .add((Mutation) new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), + Bytes.toBytes("cq"), Bytes.toBytes("v"))) + .add((Mutation) new Delete(Bytes.toBytes(0))); table.mutateRow(mutations).join(); assertTrace("BATCH", hasAttributes( containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE", "PUT"))); @@ -457,85 +454,84 @@ public void testScanAll() { @Test public void testExistsList() { CompletableFuture - .allOf( - table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + .allOf( + table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testExistsAll() { table.existsAll(Arrays.asList(new Get(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testGetList() { CompletableFuture - .allOf(table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + .allOf( + table.get(Arrays.asList(new Get(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testGetAll() { table.getAll(Arrays.asList(new Get(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testPutList() { CompletableFuture - .allOf(table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), - Bytes.toBytes("cq"), Bytes.toBytes("v")))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); + .allOf(table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), + Bytes.toBytes("cq"), Bytes.toBytes("v")))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @Test public void testPutAll() { table.putAll(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @Test public void testDeleteList() { - CompletableFuture - .allOf( - table.delete(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + CompletableFuture.allOf( + table.delete(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testDeleteAll() { table.deleteAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testBatch() { - CompletableFuture - .allOf( - table.batch(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) - .join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + CompletableFuture.allOf( + table.batch(Arrays.asList(new Delete(Bytes.toBytes(0)))).toArray(new CompletableFuture[0])) + .join(); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testBatchAll() { table.batchAll(Arrays.asList(new Delete(Bytes.toBytes(0)))).join(); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java index 15d5104730a4..a962cf7c91f2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAttributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,13 +27,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestAttributes { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestAttributes.class); - private static final byte [] ROW = new byte [] {'r'}; + private static final byte[] ROW = new byte[] { 'r' }; + @Test public void testPutAttributes() { Put put = new Put(ROW); @@ -48,22 +49,22 @@ public void testPutAttributes() { put.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), put.getAttribute("attribute1"))); Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - put.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), put.getAttributesMap().get("attribute1"))); // overriding attribute value put.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), put.getAttribute("attribute1"))); Assert.assertEquals(1, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - put.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), put.getAttributesMap().get("attribute1"))); // adding another attribute put.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), put.getAttribute("attribute2"))); Assert.assertEquals(2, put.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - put.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), put.getAttributesMap().get("attribute2"))); // removing attribute put.setAttribute("attribute2", null); @@ -86,7 +87,7 @@ public void testPutAttributes() { @Test public void testDeleteAttributes() { - Delete del = new Delete(new byte [] {'r'}); + Delete del = new Delete(new byte[] { 'r' }); Assert.assertTrue(del.getAttributesMap().isEmpty()); Assert.assertNull(del.getAttribute("absent")); @@ -98,22 +99,22 @@ public void testDeleteAttributes() { del.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), del.getAttribute("attribute1"))); Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - del.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), del.getAttributesMap().get("attribute1"))); // overriding attribute value del.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), del.getAttribute("attribute1"))); Assert.assertEquals(1, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - del.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), del.getAttributesMap().get("attribute1"))); // adding another attribute del.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), del.getAttribute("attribute2"))); Assert.assertEquals(2, del.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - del.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), del.getAttributesMap().get("attribute2"))); // removing attribute del.setAttribute("attribute2", null); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java index c9993ee2dd1f..7ab6ad3c2064 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestBufferedMutator { @ClassRule @@ -45,8 +45,7 @@ public class TestBufferedMutator { public TestName name = new TestName(); /** - * My BufferedMutator. - * Just to prove that I can insert a BM other than default. + * My BufferedMutator. Just to prove that I can insert a BM other than default. */ public static class MyBufferedMutator extends BufferedMutatorImpl { MyBufferedMutator(ClusterConnection conn, RpcRetryingCallerFactory rpcCallerFactory, @@ -57,10 +56,11 @@ public static class MyBufferedMutator extends BufferedMutatorImpl { @Test public void testAlternateBufferedMutatorImpl() throws IOException { - BufferedMutatorParams params = new BufferedMutatorParams(TableName.valueOf(name.getMethodName())); + BufferedMutatorParams params = + new BufferedMutatorParams(TableName.valueOf(name.getMethodName())); Configuration conf = HBaseConfiguration.create(); conf.set(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - DoNothingConnectionRegistry.class.getName()); + DoNothingConnectionRegistry.class.getName()); try (Connection connection = ConnectionFactory.createConnection(conf)) { BufferedMutator bm = connection.getBufferedMutator(params); // Assert we get default BM if nothing specified. diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java index 73953d0db75e..f30b14e0b0ba 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestBufferedMutatorParams.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -98,15 +98,14 @@ public Future submit(Runnable task) { } @Override - public List> invokeAll( - Collection> tasks) throws InterruptedException { + public List> invokeAll(Collection> tasks) + throws InterruptedException { return null; } @Override - public List> invokeAll( - Collection> tasks, long timeout, TimeUnit unit) - throws InterruptedException { + public List> invokeAll(Collection> tasks, long timeout, + TimeUnit unit) throws InterruptedException { return null; } @@ -117,8 +116,7 @@ public T invokeAny(Collection> tasks) } @Override - public T invokeAny(Collection> tasks, - long timeout, TimeUnit unit) + public T invokeAny(Collection> tasks, long timeout, TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { return null; } @@ -129,8 +127,8 @@ public T invokeAny(Collection> tasks, */ private static class MockExceptionListener implements BufferedMutator.ExceptionListener { @Override - public void onException(RetriesExhaustedWithDetailsException exception, - BufferedMutator mutator) throws RetriesExhaustedWithDetailsException { + public void onException(RetriesExhaustedWithDetailsException exception, BufferedMutator mutator) + throws RetriesExhaustedWithDetailsException { } } @@ -141,13 +139,9 @@ public void testClone() { BufferedMutatorParams bmp = new BufferedMutatorParams(TableName.valueOf(tableName)); BufferedMutator.ExceptionListener listener = new MockExceptionListener(); - bmp - .writeBufferSize(17) - .setWriteBufferPeriodicFlushTimeoutMs(123) - .setWriteBufferPeriodicFlushTimerTickMs(456) - .maxKeyValueSize(13) - .pool(pool) - .listener(listener); + bmp.writeBufferSize(17).setWriteBufferPeriodicFlushTimeoutMs(123) + .setWriteBufferPeriodicFlushTimerTickMs(456).maxKeyValueSize(13).pool(pool) + .listener(listener); bmp.implementationClassName("someClassName"); BufferedMutatorParams clone = bmp.clone(); @@ -175,16 +169,14 @@ public void testClone() { * @param some some instance * @param clone a clone of that instance, but not the same instance. */ - private void cloneTest(BufferedMutatorParams some, - BufferedMutatorParams clone) { + private void cloneTest(BufferedMutatorParams some, BufferedMutatorParams clone) { assertFalse(some == clone); - assertEquals(some.getTableName().toString(), - clone.getTableName().toString()); + assertEquals(some.getTableName().toString(), clone.getTableName().toString()); assertEquals(some.getWriteBufferSize(), clone.getWriteBufferSize()); assertEquals(some.getWriteBufferPeriodicFlushTimeoutMs(), - clone.getWriteBufferPeriodicFlushTimeoutMs()); + clone.getWriteBufferPeriodicFlushTimeoutMs()); assertEquals(some.getWriteBufferPeriodicFlushTimerTickMs(), - clone.getWriteBufferPeriodicFlushTimerTickMs()); + clone.getWriteBufferPeriodicFlushTimerTickMs()); assertEquals(some.getMaxKeyValueSize(), clone.getMaxKeyValueSize()); assertTrue(some.getListener() == clone.getListener()); assertTrue(some.getPool() == clone.getPool()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java index 0df04b8043f8..b2e9abfc2c5e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientExponentialBackoff.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestClientExponentialBackoff { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -67,8 +67,8 @@ public void testMaxLoad() { ServerStatistics stats = new ServerStatistics(); update(stats, 100); - assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, - regionname, stats)); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, + backoff.getBackoffTime(server, regionname, stats)); // another policy with a different max timeout long max = 100; @@ -78,20 +78,20 @@ public void testMaxLoad() { // test beyond 100 still doesn't exceed the max update(stats, 101); - assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, backoff.getBackoffTime(server, - regionname, stats)); + assertEquals(ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF, + backoff.getBackoffTime(server, regionname, stats)); assertEquals(max, backoffShortTimeout.getBackoffTime(server, regionname, stats)); // and that when we are below 100, its less than the max timeout update(stats, 99); - assertTrue(backoff.getBackoffTime(server, - regionname, stats) < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); + assertTrue(backoff.getBackoffTime(server, regionname, + stats) < ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF); assertTrue(backoffShortTimeout.getBackoffTime(server, regionname, stats) < max); } /** - * Make sure that we get results in the order that we expect - backoff for a load of 1 should - * less than backoff for 10, which should be less than that for 50. + * Make sure that we get results in the order that we expect - backoff for a load of 1 should less + * than backoff for 10, which should be less than that for 50. */ @Test public void testResultOrdering() { @@ -105,9 +105,9 @@ public void testResultOrdering() { for (int i = 1; i <= 100; i++) { update(stats, i); long next = backoff.getBackoffTime(server, regionname, stats); - assertTrue( - "Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + - "load " + i, previous < next); + assertTrue("Previous backoff time" + previous + " >= " + next + ", the next backoff time for " + + "load " + i, + previous < next); previous = next; } } @@ -151,8 +151,7 @@ public void testCompactionPressurePolicy() { long previous = backoffTime; update(stats, 0, 0, 50); backoffTime = backoff.getBackoffTime(server, regionname, stats); - assertTrue("Compaction pressure should be bigger", - backoffTime > previous); + assertTrue("Compaction pressure should be bigger", backoffTime > previous); update(stats, 0, 0, 100); backoffTime = backoff.getBackoffTime(server, regionname, stats); @@ -161,18 +160,16 @@ public void testCompactionPressurePolicy() { } private void update(ServerStatistics stats, int load) { - ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() - .setMemStoreLoad(load).build(); + ClientProtos.RegionLoadStats stat = + ClientProtos.RegionLoadStats.newBuilder().setMemStoreLoad(load).build(); stats.update(regionname, ProtobufUtil.createRegionLoadStats(stat)); } private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy, - int compactionPressure) { - ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder() - .setMemStoreLoad(memstoreLoad) - .setHeapOccupancy(heapOccupancy) - .setCompactionPressure(compactionPressure) - .build(); + int compactionPressure) { + ClientProtos.RegionLoadStats stat = + ClientProtos.RegionLoadStats.newBuilder().setMemStoreLoad(memstoreLoad) + .setHeapOccupancy(heapOccupancy).setCompactionPressure(compactionPressure).build(); stats.update(regionname, ProtobufUtil.createRegionLoadStats(stat)); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java index 6c987322cf17..c700d1b7dd9d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientNoCluster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -104,11 +104,10 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; /** - * Test client behavior w/o setting up a cluster. - * Mock up cluster emissions. - * See below for a method that tests retries/timeouts currently commented out. + * Test client behavior w/o setting up a cluster. Mock up cluster emissions. See below for a method + * that tests retries/timeouts currently commented out. */ -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestClientNoCluster extends Configured implements Tool { @ClassRule @@ -118,10 +117,10 @@ public class TestClientNoCluster extends Configured implements Tool { private static final Logger LOG = LoggerFactory.getLogger(TestClientNoCluster.class); private Configuration conf; /** - * A server that does not exist. I've changed the server in the below to 'localhost' so we - * have a servername that resolves -- otherwise, we just fail on server name lookup with - * UnknownHost... With localhost, was able to reproduce stack traces that looked like production - * stack traces. Was useful figuring out how retry/timeouts are functioning. + * A server that does not exist. I've changed the server in the below to 'localhost' so we have a + * servername that resolves -- otherwise, we just fail on server name lookup with UnknownHost... + * With localhost, was able to reproduce stack traces that looked like production stack traces. + * Was useful figuring out how retry/timeouts are functioning. */ public static final ServerName META_SERVERNAME = ServerName.valueOf("meta.example.org", 16010, 12345); @@ -129,7 +128,7 @@ public class TestClientNoCluster extends Configured implements Tool { @Before public void setUp() throws Exception { this.conf = HBaseConfiguration.create(); - // Run my Connection overrides. Use my little ConnectionImplementation below which + // Run my Connection overrides. Use my little ConnectionImplementation below which // allows me insert mocks and also use my Registry below rather than the default zk based // one so tests run faster and don't have zk dependency. this.conf.set("hbase.client.registry.impl", SimpleRegistry.class.getName()); @@ -179,7 +178,7 @@ public void testTimeoutAndRetries() throws IOException { LOG.info("Got expected exception", e); t = e; } catch (RetriesExhaustedException e) { - // This is the old, unwanted behavior. If we get here FAIL!!! + // This is the old, unwanted behavior. If we get here FAIL!!! fail(); } finally { table.close(); @@ -229,9 +228,9 @@ public void testRpcTimeout() throws IOException { int pause = 10; localConfig.setInt("hbase.client.pause", pause); localConfig.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 10); - // Set the operation timeout to be < the pause. Expectation is that after first pause, we will + // Set the operation timeout to be < the pause. Expectation is that after first pause, we will // fail out of the rpc because the rpc timeout will have been set to the operation tiemout - // and it has expired. Otherwise, if this functionality is broke, all retries will be run -- + // and it has expired. Otherwise, if this functionality is broke, all retries will be run -- // all ten of them -- and we'll get the RetriesExhaustedException exception. localConfig.setInt(HConstants.HBASE_CLIENT_META_OPERATION_TIMEOUT, pause - 1); Connection connection = ConnectionFactory.createConnection(localConfig); @@ -245,7 +244,7 @@ public void testRpcTimeout() throws IOException { LOG.info("Got expected exception", e); t = e; } catch (RetriesExhaustedException e) { - // This is the old, unwanted behavior. If we get here FAIL!!! + // This is the old, unwanted behavior. If we get here FAIL!!! fail(); } finally { table.close(); @@ -268,7 +267,7 @@ public void testDoNotRetryOnScanNext() throws IOException { this.conf.set("hbase.client.connection.impl", RegionServerStoppedOnScannerOpenConnection.class.getName()); // Go against meta else we will try to find first region for the table on construction which - // means we'll have to do a bunch more mocking. Tests that go against meta only should be + // means we'll have to do a bunch more mocking. Tests that go against meta only should be // good for a bit of testing. Connection connection = ConnectionFactory.createConnection(this.conf); Table table = connection.getTable(TableName.META_TABLE_NAME); @@ -290,7 +289,7 @@ public void testRegionServerStoppedOnScannerOpen() throws IOException { this.conf.set("hbase.client.connection.impl", RegionServerStoppedOnScannerOpenConnection.class.getName()); // Go against meta else we will try to find first region for the table on construction which - // means we'll have to do a bunch more mocking. Tests that go against meta only should be + // means we'll have to do a bunch more mocking. Tests that go against meta only should be // good for a bit of testing. Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(TableName.META_TABLE_NAME); @@ -336,24 +335,24 @@ public void testConnectionClosedOnRegionLocate() throws IOException { /** * Override to shutdown going to zookeeper for cluster id and meta location. */ - static class RegionServerStoppedOnScannerOpenConnection - extends ConnectionImplementation { + static class RegionServerStoppedOnScannerOpenConnection extends ConnectionImplementation { final ClientService.BlockingInterface stub; - RegionServerStoppedOnScannerOpenConnection(Configuration conf, - ExecutorService pool, User user) throws IOException { + RegionServerStoppedOnScannerOpenConnection(Configuration conf, ExecutorService pool, User user) + throws IOException { super(conf, pool, user); // Mock up my stub so open scanner returns a scanner id and then on next, we throw // exceptions for three times and then after that, we return no more to scan. this.stub = Mockito.mock(ClientService.BlockingInterface.class); long sid = 12345L; try { - Mockito.when(stub.scan((RpcController)Mockito.any(), - (ClientProtos.ScanRequest)Mockito.any())). - thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid).build()). - thenThrow(new ServiceException(new RegionServerStoppedException("From Mockito"))). - thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid). - setMoreResults(false).build()); + Mockito + .when( + stub.scan((RpcController) Mockito.any(), (ClientProtos.ScanRequest) Mockito.any())) + .thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid).build()) + .thenThrow(new ServiceException(new RegionServerStoppedException("From Mockito"))) + .thenReturn(ClientProtos.ScanResponse.newBuilder().setScannerId(sid) + .setMoreResults(false).build()); } catch (ServiceException e) { throw new IOException(e); } @@ -371,15 +370,14 @@ public BlockingInterface getClient(ServerName sn) throws IOException { static class RpcTimeoutConnection extends ConnectionImplementation { final ClientService.BlockingInterface stub; - RpcTimeoutConnection(Configuration conf, ExecutorService pool, User user) - throws IOException { + RpcTimeoutConnection(Configuration conf, ExecutorService pool, User user) throws IOException { super(conf, pool, user); // Mock up my stub so an exists call -- which turns into a get -- throws an exception this.stub = Mockito.mock(ClientService.BlockingInterface.class); try { - Mockito.when(stub.get((RpcController)Mockito.any(), - (ClientProtos.GetRequest)Mockito.any())). - thenThrow(new ServiceException(new java.net.ConnectException("Connection refused"))); + Mockito + .when(stub.get((RpcController) Mockito.any(), (ClientProtos.GetRequest) Mockito.any())) + .thenThrow(new ServiceException(new java.net.ConnectException("Connection refused"))); } catch (ServiceException e) { throw new IOException(e); } @@ -404,29 +402,26 @@ static class RpcTimeoutAsyncConnection extends AsyncConnectionImpl { /** * Fake many regionservers and many regions on a connection implementation. */ - static class ManyServersManyRegionsConnection - extends ConnectionImplementation { + static class ManyServersManyRegionsConnection extends ConnectionImplementation { // All access should be synchronized final Map serversByClient; /** * Map of faked-up rows of a 'meta table'. */ - final SortedMap> meta; + final SortedMap> meta; final AtomicLong sequenceids = new AtomicLong(0); private final Configuration conf; - ManyServersManyRegionsConnection(Configuration conf, - ExecutorService pool, User user) - throws IOException { + ManyServersManyRegionsConnection(Configuration conf, ExecutorService pool, User user) + throws IOException { super(conf, pool, user); int serverCount = conf.getInt("hbase.test.servers", 10); this.serversByClient = new HashMap<>(serverCount); - this.meta = makeMeta(Bytes.toBytes( - conf.get("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE))), - conf.getInt("hbase.test.regions", 100), - conf.getLong("hbase.test.namespace.span", 1000), - serverCount); + this.meta = + makeMeta(Bytes.toBytes(conf.get("hbase.test.tablename", Bytes.toString(BIG_USER_TABLE))), + conf.getInt("hbase.test.regions", 100), conf.getLong("hbase.test.namespace.span", 1000), + serverCount); this.conf = conf; } @@ -445,18 +440,17 @@ public ClientService.BlockingInterface getClient(ServerName sn) throws IOExcepti } } - static MultiResponse doMultiResponse(final SortedMap> meta, + static MultiResponse doMultiResponse(final SortedMap> meta, final AtomicLong sequenceids, final MultiRequest request) { - // Make a response to match the request. Act like there were no failures. + // Make a response to match the request. Act like there were no failures. ClientProtos.MultiResponse.Builder builder = ClientProtos.MultiResponse.newBuilder(); // Per Region. - RegionActionResult.Builder regionActionResultBuilder = - RegionActionResult.newBuilder(); + RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder(); ResultOrException.Builder roeBuilder = ResultOrException.newBuilder(); - for (RegionAction regionAction: request.getRegionActionList()) { + for (RegionAction regionAction : request.getRegionActionList()) { regionActionResultBuilder.clear(); // Per Action in a Region. - for (ClientProtos.Action action: regionAction.getActionList()) { + for (ClientProtos.Action action : regionAction.getActionList()) { roeBuilder.clear(); // Return empty Result and proper index as result. roeBuilder.setResult(ClientProtos.Result.getDefaultInstance()); @@ -469,33 +463,31 @@ static MultiResponse doMultiResponse(final SortedMap> meta; + private final SortedMap> meta; private final AtomicLong sequenceids; private final long multiPause; private final int tooManyMultiRequests; - FakeServer(final Configuration c, final SortedMap> meta, + FakeServer(final Configuration c, final SortedMap> meta, final AtomicLong sequenceids) { this.meta = meta; this.sequenceids = sequenceids; - // Pause to simulate the server taking time applying the edits. This will drive up the + // Pause to simulate the server taking time applying the edits. This will drive up the // number of threads used over in client. this.multiPause = c.getLong("hbase.test.multi.pause.when.done", 0); this.tooManyMultiRequests = c.getInt("hbase.test.multi.too.many", 3); } @Override - public GetResponse get(RpcController controller, GetRequest request) - throws ServiceException { - boolean metaRegion = isMetaRegion(request.getRegion().getValue().toByteArray(), - request.getRegion().getType()); + public GetResponse get(RpcController controller, GetRequest request) throws ServiceException { + boolean metaRegion = + isMetaRegion(request.getRegion().getValue().toByteArray(), request.getRegion().getType()); if (!metaRegion) { return doGetResponse(request); } @@ -512,41 +504,39 @@ private GetResponse doGetResponse(GetRequest request) { } @Override - public MutateResponse mutate(RpcController controller, - MutateRequest request) throws ServiceException { + public MutateResponse mutate(RpcController controller, MutateRequest request) + throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } @Override - public ScanResponse scan(RpcController controller, - ScanRequest request) throws ServiceException { + public ScanResponse scan(RpcController controller, ScanRequest request) + throws ServiceException { // Presume it is a scan of meta for now. Not all scans provide a region spec expecting - // the server to keep reference by scannerid. TODO. + // the server to keep reference by scannerid. TODO. return doMetaScanResponse(meta, sequenceids, request); } @Override - public BulkLoadHFileResponse bulkLoadHFile( - RpcController controller, BulkLoadHFileRequest request) - throws ServiceException { + public BulkLoadHFileResponse bulkLoadHFile(RpcController controller, + BulkLoadHFileRequest request) throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } @Override - public CoprocessorServiceResponse execService( - RpcController controller, CoprocessorServiceRequest request) - throws ServiceException { + public CoprocessorServiceResponse execService(RpcController controller, + CoprocessorServiceRequest request) throws ServiceException { throw new NotImplementedException(HConstants.NOT_IMPLEMENTED); } @Override public MultiResponse multi(RpcController controller, MultiRequest request) - throws ServiceException { + throws ServiceException { int concurrentInvocations = this.multiInvocationsCount.incrementAndGet(); try { if (concurrentInvocations >= tooManyMultiRequests) { - throw new ServiceException(new RegionTooBusyException("concurrentInvocations=" + - concurrentInvocations)); + throw new ServiceException( + new RegionTooBusyException("concurrentInvocations=" + concurrentInvocations)); } Threads.sleep(multiPause); return doMultiResponse(meta, sequenceids, request); @@ -574,15 +564,16 @@ public CleanupBulkLoadResponse cleanupBulkLoad(RpcController controller, } } - static ScanResponse doMetaScanResponse(final SortedMap> meta, - final AtomicLong sequenceids, final ScanRequest request) { + static ScanResponse doMetaScanResponse( + final SortedMap> meta, final AtomicLong sequenceids, + final ScanRequest request) { ScanResponse.Builder builder = ScanResponse.newBuilder(); int max = request.getNumberOfRows(); int count = 0; - Map> tail = - request.hasScan()? meta.tailMap(request.getScan().getStartRow().toByteArray()): meta; - ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); - for (Map.Entry> e: tail.entrySet()) { + Map> tail = + request.hasScan() ? meta.tailMap(request.getScan().getStartRow().toByteArray()) : meta; + ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); + for (Map.Entry> e : tail.entrySet()) { // Can be 0 on open of a scanner -- i.e. rpc to setup scannerid only. if (max <= 0) break; if (++count > max) break; @@ -598,12 +589,12 @@ static ScanResponse doMetaScanResponse(final SortedMap> meta, + static GetResponse doMetaGetResponse(final SortedMap> meta, final GetRequest request) { ClientProtos.Result.Builder resultBuilder = ClientProtos.Result.newBuilder(); ByteString row = request.getGet().getRow(); @@ -623,13 +614,14 @@ static GetResponse doMetaGetResponse(final SortedMap= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; @@ -695,12 +687,12 @@ static CellProtos.Cell getStartCode(final ByteString row) { * @param namespaceSpan * @return count regions */ - private static HRegionInfo [] makeHRegionInfos(final byte [] tableName, final int count, + private static HRegionInfo[] makeHRegionInfos(final byte[] tableName, final int count, final long namespaceSpan) { - byte [] startKey = HConstants.EMPTY_BYTE_ARRAY; - byte [] endKey = HConstants.EMPTY_BYTE_ARRAY; + byte[] startKey = HConstants.EMPTY_BYTE_ARRAY; + byte[] endKey = HConstants.EMPTY_BYTE_ARRAY; long interval = namespaceSpan / count; - HRegionInfo [] hris = new HRegionInfo[count]; + HRegionInfo[] hris = new HRegionInfo[count]; for (int i = 0; i < count; i++) { if (i == 0) { endKey = format(interval); @@ -718,8 +710,8 @@ static CellProtos.Cell getStartCode(final ByteString row) { * @param count * @return Return count servernames. */ - private static ServerName [] makeServerNames(final int count) { - ServerName [] sns = new ServerName[count]; + private static ServerName[] makeServerNames(final int count) { + ServerName[] sns = new ServerName[count]; for (int i = 0; i < count; i++) { sns[i] = ServerName.valueOf("" + i + ".example.org", 16010, i); } @@ -729,8 +721,9 @@ static CellProtos.Cell getStartCode(final ByteString row) { /** * Comparator for meta row keys. */ - private static class MetaRowsComparator implements Comparator { + private static class MetaRowsComparator implements Comparator { private final CellComparatorImpl delegate = MetaCellComparator.META_COMPARATOR; + @Override public int compare(byte[] left, byte[] right) { return delegate.compareRows(new KeyValue.KeyOnlyKeyValue(left), right, 0, right.length); @@ -742,16 +735,16 @@ public int compare(byte[] left, byte[] right) { * ServerName to return for this row. * @return Map with faked hbase:meta content in it. */ - static SortedMap> makeMeta(final byte [] tableName, + static SortedMap> makeMeta(final byte[] tableName, final int regionCount, final long namespaceSpan, final int serverCount) { // I need a comparator for meta rows so we sort properly. - SortedMap> meta = - new ConcurrentSkipListMap<>(new MetaRowsComparator()); - HRegionInfo [] hris = makeHRegionInfos(tableName, regionCount, namespaceSpan); - ServerName [] serverNames = makeServerNames(serverCount); + SortedMap> meta = + new ConcurrentSkipListMap<>(new MetaRowsComparator()); + HRegionInfo[] hris = makeHRegionInfos(tableName, regionCount, namespaceSpan); + ServerName[] serverNames = makeServerNames(serverCount); int per = regionCount / serverCount; int count = 0; - for (HRegionInfo hri: hris) { + for (HRegionInfo hri : hris) { Pair p = new Pair<>(hri, serverNames[count++ / per]); meta.put(hri.getRegionName(), p); } @@ -760,13 +753,13 @@ public int compare(byte[] left, byte[] right) { /** * Code for each 'client' to run. - * * @param id * @param c * @param sharedConnection * @throws IOException */ - static void cycle(int id, final Configuration c, final Connection sharedConnection) throws IOException { + static void cycle(int id, final Configuration c, final Connection sharedConnection) + throws IOException { long namespaceSpan = c.getLong("hbase.test.namespace.span", 1000000); long startTime = EnvironmentEdgeManager.currentTime(); final int printInterval = 100000; @@ -774,38 +767,40 @@ static void cycle(int id, final Configuration c, final Connection sharedConnecti boolean get = c.getBoolean("hbase.test.do.gets", false); TableName tableName = TableName.valueOf(BIG_USER_TABLE); if (get) { - try (Table table = sharedConnection.getTable(tableName)){ + try (Table table = sharedConnection.getTable(tableName)) { Stopwatch stopWatch = Stopwatch.createStarted(); for (int i = 0; i < namespaceSpan; i++) { - byte [] b = format(rd.nextLong()); + byte[] b = format(rd.nextLong()); Get g = new Get(b); table.get(g); if (i % printInterval == 0) { - LOG.info("Get " + printInterval + "/" + stopWatch.elapsed(java.util.concurrent.TimeUnit.MILLISECONDS)); + LOG.info("Get " + printInterval + "/" + + stopWatch.elapsed(java.util.concurrent.TimeUnit.MILLISECONDS)); stopWatch.reset(); stopWatch.start(); } } - LOG.info("Finished a cycle putting " + namespaceSpan + " in " + - (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); + LOG.info("Finished a cycle putting " + namespaceSpan + " in " + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); } } else { try (BufferedMutator mutator = sharedConnection.getBufferedMutator(tableName)) { Stopwatch stopWatch = Stopwatch.createStarted(); for (int i = 0; i < namespaceSpan; i++) { - byte [] b = format(rd.nextLong()); + byte[] b = format(rd.nextLong()); Put p = new Put(b); p.addColumn(HConstants.CATALOG_FAMILY, b, b); mutator.mutate(p); if (i % printInterval == 0) { - LOG.info("Put " + printInterval + "/" + stopWatch.elapsed(java.util.concurrent.TimeUnit.MILLISECONDS)); + LOG.info("Put " + printInterval + "/" + + stopWatch.elapsed(java.util.concurrent.TimeUnit.MILLISECONDS)); stopWatch.reset(); stopWatch.start(); } } - LOG.info("Finished a cycle putting " + namespaceSpan + " in " + - (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); - } + LOG.info("Finished a cycle putting " + namespaceSpan + " in " + + (EnvironmentEdgeManager.currentTime() - startTime) + "ms"); + } } } @@ -824,16 +819,15 @@ public int run(String[] arg0) throws Exception { final long multiPause = 0; // Check args make basic sense. if ((namespaceSpan < regions) || (regions < servers)) { - throw new IllegalArgumentException("namespaceSpan=" + namespaceSpan + " must be > regions=" + - regions + " which must be > servers=" + servers); + throw new IllegalArgumentException("namespaceSpan=" + namespaceSpan + " must be > regions=" + + regions + " which must be > servers=" + servers); } // Set my many servers and many regions faking connection in place. - getConf().set("hbase.client.connection.impl", - ManyServersManyRegionsConnection.class.getName()); + getConf().set("hbase.client.connection.impl", ManyServersManyRegionsConnection.class.getName()); // Use simple kv registry rather than zk getConf().set("hbase.client.registry.impl", SimpleRegistry.class.getName()); - // When to report fails. Default is we report the 10th. This means we'll see log everytime + // When to report fails. Default is we report the 10th. This means we'll see log everytime // an exception is thrown -- usually RegionTooBusyException when we have more than // hbase.test.multi.too.many requests outstanding at any time. getConf().setInt("hbase.client.start.log.errors.counter", 0); @@ -850,14 +844,14 @@ public int run(String[] arg0) throws Exception { // Have them all share the same connection so they all share the same instance of // ManyServersManyRegionsConnection so I can keep an eye on how many requests by server. - final ExecutorService pool = Executors.newCachedThreadPool( - new ThreadFactoryBuilder().setNameFormat("p-pool-%d") - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); - // Executors.newFixedThreadPool(servers * 10, Threads.getNamedThreadFactory("p")); + final ExecutorService pool = + Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("p-pool-%d") + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + // Executors.newFixedThreadPool(servers * 10, Threads.getNamedThreadFactory("p")); // Share a connection so I can keep counts in the 'server' on concurrency. - final Connection sharedConnection = ConnectionFactory.createConnection(getConf()/*, pool*/); + final Connection sharedConnection = ConnectionFactory.createConnection(getConf()/* , pool */); try { - Thread [] ts = new Thread[clients]; + Thread[] ts = new Thread[clients]; for (int j = 0; j < ts.length; j++) { final int id = j; ts[j] = new Thread("" + j) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java index 244abe011395..68d508900c0e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestClientScanner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -121,8 +121,8 @@ protected boolean moveToNextRegion() { } // Enforce that we don't short-circuit more than once if (rpcFinishedFired) { - throw new RuntimeException("Expected nextScanner to only be called once after " + - " short-circuit was triggered."); + throw new RuntimeException( + "Expected nextScanner to only be called once after " + " short-circuit was triggered."); } rpcFinishedFired = true; return false; @@ -139,38 +139,40 @@ public void testNoResultsHint() throws IOException { final Result[] results = new Result[1]; KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); - results[0] = Result.create(new Cell[] {kv1}); + results[0] = Result.create(new Cell[] { kv1 }); RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { - private int count = 0; - @Override - public Result[] answer(InvocationOnMock invocation) throws Throwable { + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { + private int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable = invocation.getArgument(0); - switch (count) { - case 0: // initialize - count++; - callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.UNKNOWN); - return results; - case 1: // detect no more results - case 2: // close - count++; - return new Result[0]; - default: - throw new RuntimeException("Expected only 2 invocations"); + switch (count) { + case 0: // initialize + count++; + callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.UNKNOWN); + return results; + case 1: // detect no more results + case 2: // close + count++; + return new Result[0]; + default: + throw new RuntimeException("Expected only 2 invocations"); + } } - } - }); + }); // Set a much larger cache and buffer size than we'll provide scan.setCaching(100); - scan.setMaxResultSize(1000*1000); + scan.setMaxResultSize(1000 * 1000); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { scanner.setRpcFinished(true); @@ -180,8 +182,7 @@ public Result[] answer(InvocationOnMock invocation) throws Throwable { // One for fetching the results // One for fetching empty results and quit as we do not have moreResults hint. - inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(1, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -199,31 +200,32 @@ public void testSizeLimit() throws IOException { final Result[] results = new Result[1]; KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); - results[0] = Result.create(new Cell[] {kv1}); + results[0] = Result.create(new Cell[] { kv1 }); RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { - private int count = 0; - @Override - public Result[] answer(InvocationOnMock invocation) throws Throwable { - ScannerCallableWithReplicas callable = invocation.getArgument(0); - switch (count) { - case 0: // initialize - count++; - // if we set no here the implementation will trigger a close - callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.YES); - return results; - case 1: // close - count++; - return null; - default: - throw new RuntimeException("Expected only 2 invocations"); + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { + private int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgument(0); + switch (count) { + case 0: // initialize + count++; + // if we set no here the implementation will trigger a close + callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.YES); + return results; + case 1: // close + count++; + return null; + default: + throw new RuntimeException("Expected only 2 invocations"); + } } - } - }); + }); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); @@ -232,14 +234,14 @@ public Result[] answer(InvocationOnMock invocation) throws Throwable { // The single key-value will exit the loop scan.setMaxResultSize(1); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { InOrder inOrder = Mockito.inOrder(caller); scanner.loadCache(); - inOrder.verify(caller, Mockito.times(1)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(1)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(1, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -260,48 +262,49 @@ public void testCacheLimit() throws IOException { Type.Maximum); KeyValue kv3 = new KeyValue(Bytes.toBytes("row3"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); - final Result[] results = new Result[] {Result.create(new Cell[] {kv1}), - Result.create(new Cell[] {kv2}), Result.create(new Cell[] {kv3})}; + final Result[] results = new Result[] { Result.create(new Cell[] { kv1 }), + Result.create(new Cell[] { kv2 }), Result.create(new Cell[] { kv3 }) }; RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { - private int count = 0; - @Override - public Result[] answer(InvocationOnMock invocation) throws Throwable { - ScannerCallableWithReplicas callable = invocation.getArgument(0); - switch (count) { - case 0: // initialize - count++; - // if we set no here the implementation will trigger a close - callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.YES); - return results; - case 1: // close - count++; - return null; - default: - throw new RuntimeException("Expected only 2 invocations"); + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { + private int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgument(0); + switch (count) { + case 0: // initialize + count++; + // if we set no here the implementation will trigger a close + callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.YES); + return results; + case 1: // close + count++; + return null; + default: + throw new RuntimeException("Expected only 2 invocations"); + } } - } - }); + }); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); // Set a small cache scan.setCaching(1); // Set a very large size - scan.setMaxResultSize(1000*1000); + scan.setMaxResultSize(1000 * 1000); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { InOrder inOrder = Mockito.inOrder(caller); scanner.loadCache(); - inOrder.verify(caller, Mockito.times(1)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(1)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(3, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -333,47 +336,48 @@ public void testNoMoreResults() throws IOException { final Result[] results = new Result[1]; KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); - results[0] = Result.create(new Cell[] {kv1}); + results[0] = Result.create(new Cell[] { kv1 }); RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { - private int count = 0; - @Override - public Result[] answer(InvocationOnMock invocation) throws Throwable { - ScannerCallableWithReplicas callable = invocation.getArgument(0); - switch (count) { - case 0: // initialize - count++; - callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.NO); - return results; - case 1: // close - count++; - return null; - default: - throw new RuntimeException("Expected only 2 invocations"); + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { + private int count = 0; + + @Override + public Result[] answer(InvocationOnMock invocation) throws Throwable { + ScannerCallableWithReplicas callable = invocation.getArgument(0); + switch (count) { + case 0: // initialize + count++; + callable.currentScannerCallable.setMoreResultsInRegion(MoreResults.NO); + return results; + case 1: // close + count++; + return null; + default: + throw new RuntimeException("Expected only 2 invocations"); + } } - } - }); + }); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); // Set a much larger cache and buffer size than we'll provide scan.setCaching(100); - scan.setMaxResultSize(1000*1000); + scan.setMaxResultSize(1000 * 1000); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { scanner.setRpcFinished(true); InOrder inOrder = Mockito.inOrder(caller); scanner.loadCache(); - inOrder.verify(caller, Mockito.times(1)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(1)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(1, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -391,20 +395,20 @@ public void testMoreResults() throws IOException { final Result[] results1 = new Result[1]; KeyValue kv1 = new KeyValue(Bytes.toBytes("row"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); - results1[0] = Result.create(new Cell[] {kv1}); + results1[0] = Result.create(new Cell[] { kv1 }); final Result[] results2 = new Result[1]; KeyValue kv2 = new KeyValue(Bytes.toBytes("row2"), Bytes.toBytes("cf"), Bytes.toBytes("cq"), 1, Type.Maximum); - results2[0] = Result.create(new Cell[] {kv2}); - + results2[0] = Result.create(new Cell[] { kv2 }); RpcRetryingCaller caller = Mockito.mock(RpcRetryingCaller.class); Mockito.when(rpcFactory. newCaller()).thenReturn(caller); - Mockito.when(caller.callWithoutRetries(Mockito.any(), - Mockito.anyInt())).thenAnswer(new Answer() { + Mockito.when(caller.callWithoutRetries(Mockito.any(), Mockito.anyInt())) + .thenAnswer(new Answer() { private int count = 0; + @Override public Result[] answer(InvocationOnMock invocation) throws Throwable { ScannerCallableWithReplicas callable = invocation.getArgument(0); @@ -425,21 +429,21 @@ public Result[] answer(InvocationOnMock invocation) throws Throwable { throw new RuntimeException("Expected only 3 invocations"); } } - }); + }); // Set a much larger cache and buffer size than we'll provide scan.setCaching(100); - scan.setMaxResultSize(1000*1000); + scan.setMaxResultSize(1000 * 1000); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, controllerFactory, pool, Integer.MAX_VALUE)) { InOrder inOrder = Mockito.inOrder(caller); scanner.setRpcFinished(true); scanner.loadCache(); - inOrder.verify(caller, Mockito.times(2)).callWithoutRetries( - Mockito.any(), Mockito.anyInt()); + inOrder.verify(caller, Mockito.times(2)).callWithoutRetries(Mockito.any(), Mockito.anyInt()); assertEquals(2, scanner.cache.size()); Result r = scanner.cache.poll(); @@ -473,11 +477,12 @@ public void testExceptionsFromReplicasArePropagated() throws IOException { MockRpcRetryingCallerFactory.class.getName()); // mock 3 replica locations - when(clusterConn.locateRegion((TableName)any(), (byte[])any(), anyBoolean(), - anyBoolean(), anyInt())).thenReturn(new RegionLocations(null, null, null)); + when(clusterConn.locateRegion((TableName) any(), (byte[]) any(), anyBoolean(), anyBoolean(), + anyInt())).thenReturn(new RegionLocations(null, null, null)); - try (MockClientScanner scanner = new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), - clusterConn, rpcFactory, new RpcControllerFactory(conf), pool, Integer.MAX_VALUE)) { + try (MockClientScanner scanner = + new MockClientScanner(conf, scan, TableName.valueOf(name.getMethodName()), clusterConn, + rpcFactory, new RpcControllerFactory(conf), pool, Integer.MAX_VALUE)) { Iterator iter = scanner.iterator(); while (iter.hasNext()) { iter.next(); @@ -500,6 +505,7 @@ public RpcRetryingCaller newCaller() { @Override public void cancel() { } + @Override public T callWithRetries(RetryingCallable callable, int callTimeout) throws IOException, RuntimeException { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java index 15772324c811..6a8e565e60b2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; @@ -42,9 +43,8 @@ import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; -import java.util.Map; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestColumnFamilyDescriptorBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -55,11 +55,9 @@ public class TestColumnFamilyDescriptorBuilder { @Test public void testBuilder() throws DeserializationException { - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY) - .setInMemory(true) - .setScope(HConstants.REPLICATION_SCOPE_LOCAL) - .setBloomFilterType(BloomType.NONE); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY).setInMemory(true) + .setScope(HConstants.REPLICATION_SCOPE_LOCAL).setBloomFilterType(BloomType.NONE); final int v = 123; builder.setBlocksize(v); builder.setTimeToLive(v); @@ -81,13 +79,13 @@ public void testBuilder() throws DeserializationException { builder.setDFSReplication((short) v); ColumnFamilyDescriptor hcd = builder.build(); - byte [] bytes = ColumnFamilyDescriptorBuilder.toByteArray(hcd); + byte[] bytes = ColumnFamilyDescriptorBuilder.toByteArray(hcd); ColumnFamilyDescriptor deserializedHcd = ColumnFamilyDescriptorBuilder.parseFrom(bytes); assertTrue(hcd.equals(deserializedHcd)); assertEquals(v, hcd.getBlocksize()); assertEquals(v, hcd.getTimeToLive()); - assertTrue(Bytes.equals(hcd.getValue(Bytes.toBytes("a")), - deserializedHcd.getValue(Bytes.toBytes("a")))); + assertTrue( + Bytes.equals(hcd.getValue(Bytes.toBytes("a")), deserializedHcd.getValue(Bytes.toBytes("a")))); assertEquals(hcd.getMaxVersions(), deserializedHcd.getMaxVersions()); assertEquals(hcd.getMinVersions(), deserializedHcd.getMinVersions()); assertEquals(hcd.getKeepDeletedCells(), deserializedHcd.getKeepDeletedCells()); @@ -116,8 +114,8 @@ public void testHColumnDescriptorShouldThrowIAEWhenFamilyNameEmpty() throws Exce */ @Test public void testAddGetRemoveConfiguration() { - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); String key = "Some"; String value = "value"; builder.setConfiguration(key, value); @@ -134,11 +132,11 @@ public void testMobValuesInHColumnDescriptorShouldReadable() { // We unify the format of all values saved in the descriptor. // Each value is stored as bytes of string. String isMobString = PrettyPrinter.format(String.valueOf(isMob), - HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); + HColumnDescriptor.getUnit(HColumnDescriptor.IS_MOB)); String thresholdString = PrettyPrinter.format(String.valueOf(threshold), - HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_THRESHOLD)); String policyString = PrettyPrinter.format(Bytes.toStringBinary(Bytes.toBytes(policy)), - HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); + HColumnDescriptor.getUnit(HColumnDescriptor.MOB_COMPACT_PARTITION_POLICY)); assertEquals(String.valueOf(isMob), isMobString); assertEquals(String.valueOf(threshold), thresholdString); assertEquals(String.valueOf(policy), policyString); @@ -146,16 +144,11 @@ public void testMobValuesInHColumnDescriptorShouldReadable() { @Test public void testClassMethodsAreBuilderStyle() { - /* HColumnDescriptor should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * HColumnDescriptor hcd - * = new HColumnDescriptor() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * HColumnDescriptor should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: HColumnDescriptor hcd = new HColumnDescriptor() + * .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" + * returns the declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(ColumnFamilyDescriptorBuilder.class); @@ -164,8 +157,8 @@ public void testClassMethodsAreBuilderStyle() { @Test public void testSetTimeToLive() throws HBaseException { String ttl; - ColumnFamilyDescriptorBuilder builder - = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder builder = + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); ttl = "50000"; builder.setTimeToLive(ttl); @@ -200,7 +193,7 @@ public void testSetTimeToLive() throws HBaseException { public void testSetBlocksize() throws HBaseException { String blocksize; ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("foo")); blocksize = "131072"; builder.setBlocksize(blocksize); @@ -256,7 +249,7 @@ public void testDefaultBuilder() { @Test public void testSetEmptyValue() { ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY); + ColumnFamilyDescriptorBuilder.newBuilder(HConstants.CATALOG_FAMILY); String testConf = "TestConfiguration"; String testValue = "TestValue"; // test set value diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java index ac8aed866e68..7d62d5e129a0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestColumnFamilyDescriptorLowerCaseEnum.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,10 +43,10 @@ public class TestColumnFamilyDescriptorLowerCaseEnum { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestColumnFamilyDescriptorLowerCaseEnum.class); + HBaseClassTestRule.forClass(TestColumnFamilyDescriptorLowerCaseEnum.class); private static final Logger LOG = - LoggerFactory.getLogger(TestColumnFamilyDescriptorLowerCaseEnum.class); + LoggerFactory.getLogger(TestColumnFamilyDescriptorLowerCaseEnum.class); private Method getSetMethod(Method getMethod, Class enumType) throws NoSuchMethodException { String methodName = getMethod.getName().replaceFirst("get", "set"); @@ -71,7 +71,7 @@ public void test() throws IllegalAccessException, InvocationTargetException, NoSuchMethodException { Map> getMethod2Value = new HashMap<>(); ColumnFamilyDescriptorBuilder builder = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test")); for (Method method : ColumnFamilyDescriptor.class.getMethods()) { if (method.getParameterCount() == 0 && method.getReturnType().isEnum()) { LOG.info("Checking " + method); @@ -85,7 +85,7 @@ public void test() } ColumnFamilyDescriptor desc = builder.build(); ColumnFamilyDescriptorBuilder builder2 = - ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test2")); + ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("test2")); desc.getValues().forEach((k, v) -> { LOG.info(k.toString() + "=>" + v.toString()); String str = Bytes.toString(v.get(), v.getOffset(), v.getLength()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryTracing.java index ed2eb80fe871..2f5519a88880 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionFactoryTracing.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client; import static org.apache.hadoop.hbase.HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY; + import java.io.Closeable; import java.io.IOException; import java.util.concurrent.ExecutionException; @@ -31,6 +32,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, SmallTests.class }) @@ -38,7 +40,7 @@ public class TestConnectionFactoryTracing extends TestTracingBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionFactoryTracing.class); + HBaseClassTestRule.forClass(TestConnectionFactoryTracing.class); private User currentUser; private Object connection; @@ -64,7 +66,7 @@ public void testConnectionTracing() throws IOException { @Test public void testAsyncConnectionTracing() - throws IOException, ExecutionException, InterruptedException { + throws IOException, ExecutionException, InterruptedException { connection = ConnectionFactory.createAsyncConnection(conf, currentUser).get(); assertTrace(ConnectionFactory.class.getSimpleName(), "createAsyncConnection", null, null); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementationTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementationTracing.java index 9cbd0240e9a9..c6a236f8458a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementationTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionImplementationTracing.java @@ -28,6 +28,7 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, SmallTests.class }) @@ -35,7 +36,7 @@ public class TestConnectionImplementationTracing extends TestTracingBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionImplementationTracing.class); + HBaseClassTestRule.forClass(TestConnectionImplementationTracing.class); ConnectionImplementation conn; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java index 561b1f5715fd..c29724d4048c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestConnectionRegistryLeak.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ public class TestConnectionRegistryLeak { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionRegistryLeak.class); + HBaseClassTestRule.forClass(TestConnectionRegistryLeak.class); public static final class ConnectionRegistryForTest extends DoNothingConnectionRegistry { @@ -72,7 +72,7 @@ public void close() { @BeforeClass public static void setUp() { CONF.setClass(HConstants.CLIENT_CONNECTION_REGISTRY_IMPL_CONF_KEY, - ConnectionRegistryForTest.class, ConnectionRegistry.class); + ConnectionRegistryForTest.class, ConnectionRegistry.class); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java index b288f98f1f92..6e64e1ec9782 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestCoprocessorDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ public class TestCoprocessorDescriptor { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorDescriptor.class); + HBaseClassTestRule.forClass(TestCoprocessorDescriptor.class); private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorDescriptor.class); @@ -55,9 +55,8 @@ public void testBuild() { int priority = 100; String propertyKey = "propertyKey"; String propertyValue = "propertyValue"; - CoprocessorDescriptor cp = - CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path).setPriority(priority) - .setProperty(propertyKey, propertyValue).build(); + CoprocessorDescriptor cp = CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperty(propertyKey, propertyValue).build(); assertEquals(className, cp.getClassName()); assertEquals(path, cp.getJarPath().get()); assertEquals(priority, cp.getPriority()); @@ -73,13 +72,11 @@ public void testSetCoprocessor() throws IOException { String path = "path"; int priority = Math.abs(className.hashCode()); String propertyValue = "propertyValue"; - cps.add( - CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path).setPriority(priority) - .setProperty(propertyKey, propertyValue).build()); + cps.add(CoprocessorDescriptorBuilder.newBuilder(className).setJarPath(path) + .setPriority(priority).setProperty(propertyKey, propertyValue).build()); } - TableDescriptor tableDescriptor = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setCoprocessors(cps).build(); + TableDescriptor tableDescriptor = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())).setCoprocessors(cps).build(); for (CoprocessorDescriptor cp : cps) { boolean match = false; for (CoprocessorDescriptor that : tableDescriptor.getCoprocessorDescriptors()) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDelayingRunner.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDelayingRunner.java index 22e718bf95f5..d8c01f1fd489 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDelayingRunner.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDelayingRunner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,7 @@ public class TestDelayingRunner { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDelayingRunner.class); - private static final TableName DUMMY_TABLE = - TableName.valueOf("DUMMY_TABLE"); + private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1"); private static final byte[] DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2"); private static HRegionInfo hri1 = @@ -47,7 +46,7 @@ public class TestDelayingRunner { @SuppressWarnings({ "rawtypes", "unchecked" }) @Test - public void testDelayingRunner() throws Exception{ + public void testDelayingRunner() throws Exception { MultiAction ma = new MultiAction(); ma.add(hri1.getRegionName(), new Action(new Put(DUMMY_BYTES_1), 0)); final AtomicLong endTime = new AtomicLong(); @@ -63,7 +62,7 @@ public void run() { runner.run(); long delay = endTime.get() - startTime; assertTrue("DelayingRunner did not delay long enough", delay >= sleepTime); - assertFalse("DelayingRunner delayed too long", delay > sleepTime + sleepTime*0.2); + assertFalse("DelayingRunner delayed too long", delay > sleepTime + sleepTime * 0.2); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java index e855055fd889..50a51ad9f249 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestDeleteTimeStamp.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestDeleteTimeStamp { @ClassRule diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java index 38e11c9b457a..dcdcf9c61b7b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestGet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,43 +52,42 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestGet { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGet.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGet.class); - private static final byte [] ROW = new byte [] {'r'}; + private static final byte[] ROW = new byte[] { 'r' }; private static final String PB_GET = "CgNyb3ciEwoPdGVzdC5Nb2NrRmlsdGVyEgAwATgB"; private static final String PB_GET_WITH_FILTER_LIST = - "CgFyIosBCilvcmcuYXBhY2hlLmhhZG9vcC5oYmFzZS5maWx0ZXIuRmlsdGVyTGlzdBJeCAESEwoP" + - "dGVzdC5Nb2NrRmlsdGVyEgASEQoNbXkuTW9ja0ZpbHRlchIAEjIKLG9yZy5hcGFjaGUuaGFkb29w" + - "LmhiYXNlLmZpbHRlci5LZXlPbmx5RmlsdGVyEgIIADABOAE="; + "CgFyIosBCilvcmcuYXBhY2hlLmhhZG9vcC5oYmFzZS5maWx0ZXIuRmlsdGVyTGlzdBJeCAESEwoP" + + "dGVzdC5Nb2NrRmlsdGVyEgASEQoNbXkuTW9ja0ZpbHRlchIAEjIKLG9yZy5hcGFjaGUuaGFkb29w" + + "LmhiYXNlLmZpbHRlci5LZXlPbmx5RmlsdGVyEgIIADABOAE="; private static final String MOCK_FILTER_JAR = - "UEsDBBQACAgIANWDlEMAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAA" + - "AAAAAFBLAwQUAAgICADVg5RDAAAAAAAAAAAAAAAAFAAAAE1FVEEtSU5GL01BTklGRVNULk1G803M" + - "y0xLLS7RDUstKs7Mz7NSMNQz4OVyLkpNLElN0XWqBAmY6xnEG1gqaPgXJSbnpCo45xcV5BcllgCV" + - "a/Jy8XIBAFBLBwgxyqRbQwAAAEQAAABQSwMEFAAICAgAUoOUQwAAAAAAAAAAAAAAABMAAABteS9N" + - "b2NrRmlsdGVyLmNsYXNzdZHPTsJAEMa/LYVCRVFQMd68gQc38YrxUJUTetGQGE7bstrVwjbbYsSn" + - "0hOJJj6AD2WcFoP/4iYzX+bb32xmd9/en18B7GPLhY11BxsurEw3GUoHaqzSQ4ZCq91nsI/0UDLU" + - "emoszyYjX5oL4Ufk1Hs6EFFfGJXVn6adhirJ6NGUn+rgtquiVJoOQyUWJpFdo0cMjdbAa/8hnNj3" + - "pqmkbmvgMbgn94GMU6XHiYMm1ed6YgJJeDbNV+fejbgTVRRRYlj+cSZDW5trLmIRhJKHYqh1zENf" + - "JJJf5QCfcx45DJ3/WLmYgx/LRNJ1I/UgMmMxIXbo9WxkywLLZqHsUMVJGWlxdwb2lG+XKZdys4kK" + - "5eocgIsl0grVy0Q5+e9Y+V75BdblDIXHX/3b3/rLWEGNdJXCJmeNop7zjQ9QSwcI1kzyMToBAADs" + - "AQAAUEsDBBQACAgIAFKDlEMAAAAAAAAAAAAAAAAVAAAAdGVzdC9Nb2NrRmlsdGVyLmNsYXNzdVHB" + - "TsJAFJwthUJFERQx3ryBBzfxivFQlRN60ZAYTtuy2tXCNtti1K/SE4kmfoAfZXwtBg3RTd6bzOy8" + - "zezux+frO4ADbLuwsemg6cLKcIuhdKgmKj1iKLQ7Awb7WI8kQ62vJvJ8OvaluRR+REqjrwMRDYRR" + - "Gf8W7TRUCUO9n8ok5Wc6uOupKJWmy1CJhUlkz+gxQ7M99Dp/eJzY9x5JZrCGHoN7+hDIOFV6kjho" + - "Eb/QUxNIsmeJfib3b8W9qKKIEslLpzJ0tLnhIhZBKHkoRlrHPPRFIvl1buBzn0cKQ/c/r1wk4Scy" + - "kXTpSD2JTFhkxC69oY1sWWBZGuoOMU7ICIt7M7CXfLtMvZSLLVSoV+cGuFghrBBfJZeT/5GV75Xf" + - "YF3NUHhemt/5NV/GGmqE61Q2KXWqRu7f+AJQSwcIrS5nKDoBAADyAQAAUEsBAhQAFAAICAgA1YOU" + - "QwAAAAACAAAAAAAAAAkABAAAAAAAAAAAAAAAAAAAAE1FVEEtSU5GL/7KAABQSwECFAAUAAgICADV" + - "g5RDMcqkW0MAAABEAAAAFAAAAAAAAAAAAAAAAAA9AAAATUVUQS1JTkYvTUFOSUZFU1QuTUZQSwEC" + - "FAAUAAgICABSg5RD1kzyMToBAADsAQAAEwAAAAAAAAAAAAAAAADCAAAAbXkvTW9ja0ZpbHRlci5j" + - "bGFzc1BLAQIUABQACAgIAFKDlEOtLmcoOgEAAPIBAAAVAAAAAAAAAAAAAAAAAD0CAAB0ZXN0L01v" + - "Y2tGaWx0ZXIuY2xhc3NQSwUGAAAAAAQABAABAQAAugMAAAAA"; + "UEsDBBQACAgIANWDlEMAAAAAAAAAAAAAAAAJAAQATUVUQS1JTkYv/soAAAMAUEsHCAAAAAACAAAA" + + "AAAAAFBLAwQUAAgICADVg5RDAAAAAAAAAAAAAAAAFAAAAE1FVEEtSU5GL01BTklGRVNULk1G803M" + + "y0xLLS7RDUstKs7Mz7NSMNQz4OVyLkpNLElN0XWqBAmY6xnEG1gqaPgXJSbnpCo45xcV5BcllgCV" + + "a/Jy8XIBAFBLBwgxyqRbQwAAAEQAAABQSwMEFAAICAgAUoOUQwAAAAAAAAAAAAAAABMAAABteS9N" + + "b2NrRmlsdGVyLmNsYXNzdZHPTsJAEMa/LYVCRVFQMd68gQc38YrxUJUTetGQGE7bstrVwjbbYsSn" + + "0hOJJj6AD2WcFoP/4iYzX+bb32xmd9/en18B7GPLhY11BxsurEw3GUoHaqzSQ4ZCq91nsI/0UDLU" + + "emoszyYjX5oL4Ufk1Hs6EFFfGJXVn6adhirJ6NGUn+rgtquiVJoOQyUWJpFdo0cMjdbAa/8hnNj3" + + "pqmkbmvgMbgn94GMU6XHiYMm1ed6YgJJeDbNV+fejbgTVRRRYlj+cSZDW5trLmIRhJKHYqh1zENf" + + "JJJf5QCfcx45DJ3/WLmYgx/LRNJ1I/UgMmMxIXbo9WxkywLLZqHsUMVJGWlxdwb2lG+XKZdys4kK" + + "5eocgIsl0grVy0Q5+e9Y+V75BdblDIXHX/3b3/rLWEGNdJXCJmeNop7zjQ9QSwcI1kzyMToBAADs" + + "AQAAUEsDBBQACAgIAFKDlEMAAAAAAAAAAAAAAAAVAAAAdGVzdC9Nb2NrRmlsdGVyLmNsYXNzdVHB" + + "TsJAFJwthUJFERQx3ryBBzfxivFQlRN60ZAYTtuy2tXCNtti1K/SE4kmfoAfZXwtBg3RTd6bzOy8" + + "zezux+frO4ADbLuwsemg6cLKcIuhdKgmKj1iKLQ7Awb7WI8kQ62vJvJ8OvaluRR+REqjrwMRDYRR" + + "Gf8W7TRUCUO9n8ok5Wc6uOupKJWmy1CJhUlkz+gxQ7M99Dp/eJzY9x5JZrCGHoN7+hDIOFV6kjho" + + "Eb/QUxNIsmeJfib3b8W9qKKIEslLpzJ0tLnhIhZBKHkoRlrHPPRFIvl1buBzn0cKQ/c/r1wk4Scy" + + "kXTpSD2JTFhkxC69oY1sWWBZGuoOMU7ICIt7M7CXfLtMvZSLLVSoV+cGuFghrBBfJZeT/5GV75Xf" + + "YF3NUHhemt/5NV/GGmqE61Q2KXWqRu7f+AJQSwcIrS5nKDoBAADyAQAAUEsBAhQAFAAICAgA1YOU" + + "QwAAAAACAAAAAAAAAAkABAAAAAAAAAAAAAAAAAAAAE1FVEEtSU5GL/7KAABQSwECFAAUAAgICADV" + + "g5RDMcqkW0MAAABEAAAAFAAAAAAAAAAAAAAAAAA9AAAATUVUQS1JTkYvTUFOSUZFU1QuTUZQSwEC" + + "FAAUAAgICABSg5RD1kzyMToBAADsAQAAEwAAAAAAAAAAAAAAAADCAAAAbXkvTW9ja0ZpbHRlci5j" + + "bGFzc1BLAQIUABQACAgIAFKDlEOtLmcoOgEAAPIBAAAVAAAAAAAAAAAAAAAAAD0CAAB0ZXN0L01v" + + "Y2tGaWx0ZXIuY2xhc3NQSwUGAAAAAAQABAABAQAAugMAAAAA"; @Test public void testAttributesSerialization() throws IOException { @@ -121,22 +120,22 @@ public void testGetAttributes() { get.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), get.getAttribute("attribute1"))); Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - get.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), get.getAttributesMap().get("attribute1"))); // overriding attribute value get.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), get.getAttribute("attribute1"))); Assert.assertEquals(1, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - get.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), get.getAttributesMap().get("attribute1"))); // adding another attribute get.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), get.getAttribute("attribute2"))); Assert.assertEquals(2, get.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - get.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), get.getAttributesMap().get("attribute2"))); // removing attribute get.setAttribute("attribute2", null); @@ -209,16 +208,14 @@ public void TestGetRowFromGetCopyConstructor() throws Exception { @Test public void testDynamicFilter() throws Exception { Configuration conf = HBaseConfiguration.create(); - String localPath = conf.get("hbase.local.dir") - + File.separator + "jars" + File.separator; + String localPath = conf.get("hbase.local.dir") + File.separator + "jars" + File.separator; File jarFile = new File(localPath, "MockFilter.jar"); jarFile.delete(); assertFalse("Should be deleted: " + jarFile.getPath(), jarFile.exists()); - ClientProtos.Get getProto1 = - ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET)); + ClientProtos.Get getProto1 = ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET)); ClientProtos.Get getProto2 = - ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET_WITH_FILTER_LIST)); + ClientProtos.Get.parseFrom(Base64.getDecoder().decode(PB_GET_WITH_FILTER_LIST)); try { ProtobufUtil.toGet(getProto1); fail("Should not be able to load the filter class"); @@ -230,9 +227,8 @@ public void testDynamicFilter() throws Exception { fail("Should not be able to load the filter class"); } catch (IOException ioe) { assertTrue(ioe.getCause() instanceof InvocationTargetException); - InvocationTargetException ite = (InvocationTargetException)ioe.getCause(); - assertTrue(ite.getTargetException() - instanceof DeserializationException); + InvocationTargetException ite = (InvocationTargetException) ioe.getCause(); + assertTrue(ite.getTargetException() instanceof DeserializationException); } FileOutputStream fos = new FileOutputStream(jarFile); fos.write(Base64.getDecoder().decode(MOCK_FILTER_JAR)); @@ -243,7 +239,7 @@ public void testDynamicFilter() throws Exception { Get get2 = ProtobufUtil.toGet(getProto2); assertTrue(get2.getFilter() instanceof FilterList); - List filters = ((FilterList)get2.getFilter()).getFilters(); + List filters = ((FilterList) get2.getFilter()).getFilters(); assertEquals(3, filters.size()); assertEquals("test.MockFilter", filters.get(0).getClass().getName()); assertEquals("my.MockFilter", filters.get(1).getClass().getName()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java index cce4939279dd..412b3dbbd95a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableMultiplexerViaMocks.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,8 @@ public void setupTest() { } @SuppressWarnings("deprecation") - @Test public void testConnectionClosing() throws IOException { + @Test + public void testConnectionClosing() throws IOException { doCallRealMethod().when(mockMultiplexer).close(); // If the connection is not closed when(mockConnection.isClosed()).thenReturn(false); @@ -69,7 +70,8 @@ public void setupTest() { } @SuppressWarnings("deprecation") - @Test public void testClosingAlreadyClosedConnection() throws IOException { + @Test + public void testClosingAlreadyClosedConnection() throws IOException { doCallRealMethod().when(mockMultiplexer).close(); // If the connection is already closed when(mockConnection.isClosed()).thenReturn(true); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableTracing.java index a4adfe5988af..0aa4841d7162 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestHTableTracing.java @@ -38,6 +38,7 @@ import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; @@ -70,8 +71,10 @@ import org.junit.experimental.categories.Category; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ClientService; @@ -92,7 +95,7 @@ public class TestHTableTracing extends TestTracingBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHTableTracing.class); + HBaseClassTestRule.forClass(TestHTableTracing.class); private ClientProtos.ClientService.BlockingInterface stub; private ConnectionImplementation conn; @@ -112,22 +115,19 @@ public void setUp() throws Exception { public ScanResponse answer(InvocationOnMock invocation) throws Throwable { ScanRequest req = invocation.getArgument(1); if (!req.hasScannerId()) { - return ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .setMoreResultsInRegion(true).setMoreResults(true).build(); + return ScanResponse.newBuilder().setScannerId(1).setTtl(800).setMoreResultsInRegion(true) + .setMoreResults(true).build(); } else { if (req.hasCloseScanner() && req.getCloseScanner()) { return ScanResponse.getDefaultInstance(); } else { Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put) - .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) - .setFamily(Bytes.toBytes("cf")) - .setQualifier(Bytes.toBytes("cq")) - .setValue(Bytes.toBytes("v")) - .build(); + .setType(Cell.Type.Put).setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())) + .setFamily(Bytes.toBytes("cf")).setQualifier(Bytes.toBytes("cq")) + .setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); ScanResponse.Builder builder = ScanResponse.newBuilder().setScannerId(1).setTtl(800) - .addResults(ProtobufUtil.toResult(result)); + .addResults(ProtobufUtil.toResult(result)); if (req.getLimitOfRows() == 1) { builder.setMoreResultsInRegion(false).setMoreResults(false); } else { @@ -159,15 +159,13 @@ public MutateResponse answer(InvocationOnMock invocation) throws Throwable { case INCREMENT: ColumnValue value = req.getColumnValue(0); QualifierValue qvalue = value.getQualifierValue(0); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put) - .setRow(req.getRow().toByteArray()) - .setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()) - .build(); + Cell cell = + CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); resp = MutateResponse.newBuilder() - .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); + .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell)))).build(); break; default: resp = MutateResponse.getDefaultInstance(); @@ -183,47 +181,45 @@ public GetResponse answer(InvocationOnMock invocation) throws Throwable { ClientProtos.Get req = ((GetRequest) invocation.getArgument(1)).getGet(); ColumnValue value = ColumnValue.getDefaultInstance(); QualifierValue qvalue = QualifierValue.getDefaultInstance(); - Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(Cell.Type.Put) - .setRow(req.getRow().toByteArray()) - .setFamily(value.getFamily().toByteArray()) - .setQualifier(qvalue.getQualifier().toByteArray()) - .setValue(qvalue.getValue().toByteArray()) - .build(); + Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) + .setRow(req.getRow().toByteArray()).setFamily(value.getFamily().toByteArray()) + .setQualifier(qvalue.getQualifier().toByteArray()) + .setValue(qvalue.getValue().toByteArray()).build(); return GetResponse.newBuilder() - .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell), true))).build(); + .setResult(ProtobufUtil.toResult(Result.create(Arrays.asList(cell), true))).build(); } }).when(stub).get(any(HBaseRpcController.class), any(GetRequest.class)); conn = - spy(new ConnectionImplementation(conf, null, UserProvider.instantiate(conf).getCurrent()) { - @Override - public RegionLocator getRegionLocator(TableName tableName) throws IOException { - RegionLocator locator = mock(HRegionLocator.class); - Answer answer = new Answer() { - - @Override public HRegionLocation answer(InvocationOnMock invocation) throws Throwable { - TableName tableName = TableName.META_TABLE_NAME; - RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); - ServerName serverName = MASTER_HOST; - HRegionLocation loc = new HRegionLocation(info, serverName); - return loc; - } - }; - doAnswer(answer).when(locator) - .getRegionLocation(any(byte[].class), anyInt(), anyBoolean()); - doAnswer(answer).when(locator).getRegionLocation(any(byte[].class)); - doAnswer(answer).when(locator).getRegionLocation(any(byte[].class), anyInt()); - doAnswer(answer).when(locator).getRegionLocation(any(byte[].class), anyBoolean()); - return locator; - } + spy(new ConnectionImplementation(conf, null, UserProvider.instantiate(conf).getCurrent()) { + @Override + public RegionLocator getRegionLocator(TableName tableName) throws IOException { + RegionLocator locator = mock(HRegionLocator.class); + Answer answer = new Answer() { + + @Override + public HRegionLocation answer(InvocationOnMock invocation) throws Throwable { + TableName tableName = TableName.META_TABLE_NAME; + RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); + ServerName serverName = MASTER_HOST; + HRegionLocation loc = new HRegionLocation(info, serverName); + return loc; + } + }; + doAnswer(answer).when(locator).getRegionLocation(any(byte[].class), anyInt(), + anyBoolean()); + doAnswer(answer).when(locator).getRegionLocation(any(byte[].class)); + doAnswer(answer).when(locator).getRegionLocation(any(byte[].class), anyInt()); + doAnswer(answer).when(locator).getRegionLocation(any(byte[].class), anyBoolean()); + return locator; + } - @Override - public ClientService.BlockingInterface getClient(ServerName serverName) - throws IOException { - return stub; - } - }); + @Override + public ClientService.BlockingInterface getClient(ServerName serverName) + throws IOException { + return stub; + } + }); // this setup of AsyncProcess is for MultiResponse AsyncProcess asyncProcess = mock(AsyncProcess.class); AsyncRequestFuture asyncRequestFuture = mock(AsyncRequestFuture.class); @@ -247,32 +243,25 @@ private void assertTrace(String tableOperation, Matcher matcher) { // n.b. this method implementation must match the one of the same name found in // TestAsyncTableTracing final TableName tableName = table.getName(); - final Matcher spanLocator = allOf( - hasName(containsString(tableOperation)), hasEnded()); + final Matcher spanLocator = + allOf(hasName(containsString(tableOperation)), hasEnded()); final String expectedName = tableOperation + " " + tableName.getNameWithNamespaceInclAsString(); - Waiter.waitFor(conf, 1000, new MatcherPredicate<>( - "waiting for span to emit", - () -> TRACE_RULE.getSpans(), hasItem(spanLocator))); - List candidateSpans = TRACE_RULE.getSpans() - .stream() - .filter(spanLocator::matches) - .collect(Collectors.toList()); + Waiter.waitFor(conf, 1000, new MatcherPredicate<>("waiting for span to emit", + () -> TRACE_RULE.getSpans(), hasItem(spanLocator))); + List candidateSpans = + TRACE_RULE.getSpans().stream().filter(spanLocator::matches).collect(Collectors.toList()); assertThat(candidateSpans, hasSize(1)); SpanData data = candidateSpans.iterator().next(); - assertThat(data, allOf( - hasName(expectedName), - hasKind(SpanKind.CLIENT), - hasStatusWithCode(StatusCode.OK), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(tableName), - matcher)); + assertThat(data, + allOf(hasName(expectedName), hasKind(SpanKind.CLIENT), hasStatusWithCode(StatusCode.OK), + buildConnectionAttributesMatcher(conn), buildTableAttributesMatcher(tableName), matcher)); } @Test public void testPut() throws IOException { - table.put(new Put(Bytes.toBytes(0)) - .addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v"))); + table.put(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), + Bytes.toBytes("v"))); assertTrace("PUT"); } @@ -324,89 +313,82 @@ public void testIncrementColumnValue2() throws IOException { @Test public void testCheckAndMutate() throws IOException { table.checkAndMutate(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0)))); - assertTrace("CHECK_AND_MUTATE", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0)))); + assertTrace("CHECK_AND_MUTATE", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test public void testCheckAndMutateList() throws IOException { table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0))))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0))))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test public void testCheckAndMutateAll() throws IOException { table.checkAndMutate(Arrays.asList(CheckAndMutate.newBuilder(Bytes.toBytes(0)) - .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) - .build(new Delete(Bytes.toBytes(0))))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "CHECK_AND_MUTATE", "DELETE"))); + .ifEquals(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")) + .build(new Delete(Bytes.toBytes(0))))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", + "CHECK_AND_MUTATE", "DELETE"))); } @Test public void testMutateRow() throws Exception { byte[] row = Bytes.toBytes(0); table.mutateRow(RowMutations.of(Arrays.asList(new Delete(row)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testExistsList() throws IOException { table.exists(Arrays.asList(new Get(Bytes.toBytes(0)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testExistsAll() throws IOException { table.existsAll(Arrays.asList(new Get(Bytes.toBytes(0)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testGetList() throws IOException { table.get(Arrays.asList(new Get(Bytes.toBytes(0)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "GET"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "GET"))); } @Test public void testPutList() throws IOException { table.put(Arrays.asList(new Put(Bytes.toBytes(0)).addColumn(Bytes.toBytes("cf"), Bytes.toBytes("cq"), Bytes.toBytes("v")))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "PUT"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "PUT"))); } @Test public void testDeleteList() throws IOException { table.delete(Lists.newArrayList(new Delete(Bytes.toBytes(0)))); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test public void testBatchList() throws IOException, InterruptedException { table.batch(Arrays.asList(new Delete(Bytes.toBytes(0))), null); - assertTrace("BATCH", hasAttributes( - containsEntryWithStringValuesOf( - "db.hbase.container_operations", "DELETE"))); + assertTrace("BATCH", + hasAttributes(containsEntryWithStringValuesOf("db.hbase.container_operations", "DELETE"))); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java index 1ac483da91f1..73cb4d046d01 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHColumnDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestImmutableHColumnDescriptor { @ClassRule @@ -49,44 +49,31 @@ public class TestImmutableHColumnDescriptor { @Rule public TestName name = new TestName(); private static final List> TEST_FUNCTION = Arrays.asList( - hcd -> hcd.setValue("a", "a"), - hcd -> hcd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), - hcd -> hcd.setConfiguration("aaa", "ccc"), - hcd -> hcd.remove(Bytes.toBytes("aaa")), - hcd -> hcd.removeConfiguration("xxx"), - hcd -> hcd.setBlockCacheEnabled(false), - hcd -> hcd.setBlocksize(10), - hcd -> hcd.setBloomFilterType(BloomType.NONE), - hcd -> hcd.setCacheBloomsOnWrite(false), - hcd -> hcd.setCacheDataOnWrite(true), + hcd -> hcd.setValue("a", "a"), hcd -> hcd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), + hcd -> hcd.setConfiguration("aaa", "ccc"), hcd -> hcd.remove(Bytes.toBytes("aaa")), + hcd -> hcd.removeConfiguration("xxx"), hcd -> hcd.setBlockCacheEnabled(false), + hcd -> hcd.setBlocksize(10), hcd -> hcd.setBloomFilterType(BloomType.NONE), + hcd -> hcd.setCacheBloomsOnWrite(false), hcd -> hcd.setCacheDataOnWrite(true), hcd -> hcd.setCacheIndexesOnWrite(true), hcd -> hcd.setCompactionCompressionType(Compression.Algorithm.LZO), - hcd -> hcd.setCompressTags(true), - hcd -> hcd.setCompressionType(Compression.Algorithm.LZO), + hcd -> hcd.setCompressTags(true), hcd -> hcd.setCompressionType(Compression.Algorithm.LZO), hcd -> hcd.setDFSReplication((short) 10), hcd -> hcd.setDataBlockEncoding(DataBlockEncoding.NONE), - hcd -> hcd.setEncryptionKey(Bytes.toBytes("xxx")), - hcd -> hcd.setEncryptionType("xxx"), - hcd -> hcd.setEvictBlocksOnClose(true), - hcd -> hcd.setInMemory(true), + hcd -> hcd.setEncryptionKey(Bytes.toBytes("xxx")), hcd -> hcd.setEncryptionType("xxx"), + hcd -> hcd.setEvictBlocksOnClose(true), hcd -> hcd.setInMemory(true), hcd -> hcd.setInMemoryCompaction(MemoryCompactionPolicy.NONE), - hcd -> hcd.setKeepDeletedCells(KeepDeletedCells.FALSE), - hcd -> hcd.setMaxVersions(1000), + hcd -> hcd.setKeepDeletedCells(KeepDeletedCells.FALSE), hcd -> hcd.setMaxVersions(1000), hcd -> hcd.setMinVersions(10), hcd -> hcd.setMobCompactPartitionPolicy(MobCompactPartitionPolicy.DAILY), - hcd -> hcd.setMobEnabled(true), - hcd -> hcd.setMobThreshold(10), - hcd -> hcd.setPrefetchBlocksOnOpen(true), - hcd -> hcd.setScope(0), - hcd -> hcd.setStoragePolicy("aaa"), - hcd -> hcd.setTimeToLive(100), - hcd -> hcd.setVersions(1, 10) - ); + hcd -> hcd.setMobEnabled(true), hcd -> hcd.setMobThreshold(10), + hcd -> hcd.setPrefetchBlocksOnOpen(true), hcd -> hcd.setScope(0), + hcd -> hcd.setStoragePolicy("aaa"), hcd -> hcd.setTimeToLive(100), + hcd -> hcd.setVersions(1, 10)); @Test public void testImmutable() { - ImmutableHColumnDescriptor hcd = new ImmutableHColumnDescriptor( - new HColumnDescriptor(Bytes.toBytes(name.getMethodName()))); + ImmutableHColumnDescriptor hcd = + new ImmutableHColumnDescriptor(new HColumnDescriptor(Bytes.toBytes(name.getMethodName()))); for (int i = 0; i != TEST_FUNCTION.size(); ++i) { try { TEST_FUNCTION.get(i).accept(hcd); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java index 68afeec26638..f856f47223c6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHRegionInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ /** * Test ImmutableHRegionInfo */ -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestImmutableHRegionInfo { @ClassRule @@ -46,10 +46,8 @@ public class TestImmutableHRegionInfo { @Rule public TestName name = new TestName(); - private final List> TEST_FUNCTIONS = Arrays.asList( - hri -> hri.setOffline(true), - hri -> hri.setSplit(true) - ); + private final List> TEST_FUNCTIONS = + Arrays.asList(hri -> hri.setOffline(true), hri -> hri.setSplit(true)); @Test public void testImmutable() { @@ -60,7 +58,7 @@ public void testImmutable() { try { f.accept(immutableHri); fail("ImmutableHRegionInfo can't be modified !!!"); - } catch(UnsupportedOperationException e) { + } catch (UnsupportedOperationException e) { } }); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java index b83c01ab29ae..87a3f7ba85b0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableHTableDescriptor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestImmutableHTableDescriptor { @ClassRule @@ -47,35 +47,24 @@ public class TestImmutableHTableDescriptor { @Rule public TestName name = new TestName(); private static final List> TEST_FUNCTION = Arrays.asList( - htd -> htd.setValue("a", "a"), - htd -> htd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), + htd -> htd.setValue("a", "a"), htd -> htd.setValue(Bytes.toBytes("a"), Bytes.toBytes("a")), htd -> htd.setValue(new Bytes(Bytes.toBytes("a")), new Bytes(Bytes.toBytes("a"))), - htd -> htd.setCompactionEnabled(false), - htd -> htd.setConfiguration("aaa", "ccc"), - htd -> htd.setDurability(Durability.USE_DEFAULT), - htd -> htd.setFlushPolicyClassName("class"), - htd -> htd.setMaxFileSize(123), - htd -> htd.setMemStoreFlushSize(123123123), - htd -> htd.setNormalizationEnabled(false), - htd -> htd.setPriority(123), - htd -> htd.setReadOnly(true), - htd -> htd.setRegionMemstoreReplication(true), - htd -> htd.setRegionReplication(123), - htd -> htd.setRegionSplitPolicyClassName("class"), + htd -> htd.setCompactionEnabled(false), htd -> htd.setConfiguration("aaa", "ccc"), + htd -> htd.setDurability(Durability.USE_DEFAULT), htd -> htd.setFlushPolicyClassName("class"), + htd -> htd.setMaxFileSize(123), htd -> htd.setMemStoreFlushSize(123123123), + htd -> htd.setNormalizationEnabled(false), htd -> htd.setPriority(123), + htd -> htd.setReadOnly(true), htd -> htd.setRegionMemstoreReplication(true), + htd -> htd.setRegionReplication(123), htd -> htd.setRegionSplitPolicyClassName("class"), htd -> htd.addFamily(new HColumnDescriptor(Bytes.toBytes("fm"))), - htd -> htd.remove(new Bytes(Bytes.toBytes("aaa"))), - htd -> htd.remove("aaa"), - htd -> htd.remove(Bytes.toBytes("aaa")), - htd -> htd.removeConfiguration("xxx"), - htd -> htd.removeFamily(Bytes.toBytes("fm")), - htd -> { + htd -> htd.remove(new Bytes(Bytes.toBytes("aaa"))), htd -> htd.remove("aaa"), + htd -> htd.remove(Bytes.toBytes("aaa")), htd -> htd.removeConfiguration("xxx"), + htd -> htd.removeFamily(Bytes.toBytes("fm")), htd -> { try { htd.addCoprocessor("xxx"); } catch (IOException e) { throw new RuntimeException(e); } - } - ); + }); @Test public void testImmutable() { @@ -113,18 +102,13 @@ private void assertReadOnly(HColumnDescriptor hcd) { @Test public void testClassMethodsAreBuilderStyle() { - /* ImmutableHTableDescriptor should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * ImmutableHTableDescriptor d - * = new ImmutableHTableDescriptor() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object - */ + /* + * ImmutableHTableDescriptor should have a builder style setup where setXXX/addXXX methods can + * be chainable together: . For example: ImmutableHTableDescriptor d = new + * ImmutableHTableDescriptor() .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all + * methods starting with "set" returns the declaring object + */ - BuilderStyleTest.assertClassesAreBuilderStyle(ImmutableHTableDescriptor.class); + BuilderStyleTest.assertClassesAreBuilderStyle(ImmutableHTableDescriptor.class); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java index 09b9add8ca00..2078b767eb52 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestImmutableScan.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,9 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; + import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; @@ -39,8 +40,6 @@ import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; /** * Small tests for ImmutableScan @@ -50,7 +49,7 @@ public class TestImmutableScan { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestImmutableScan.class); + HBaseClassTestRule.forClass(TestImmutableScan.class); private static final Logger LOG = LoggerFactory.getLogger(TestImmutableScan.class); @@ -59,37 +58,18 @@ public void testScanCopyConstructor() throws Exception { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) - .setACL("test_user2", new Permission(Permission.Action.READ)) - .setAllowPartialResults(true) - .setAsyncPrefetch(false) - .setAttribute("test_key", Bytes.toBytes("test_value")) - .setAuthorizations(new Authorizations("test_label")) - .setBatch(10) - .setCacheBlocks(false) - .setCaching(10) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("scan_copy_constructor") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLimit(100) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultSize(100) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setMvccReadPoint(5) - .setNeedCursorResult(true) - .setPriority(1) - .setRaw(true) - .setReplicaId(3) - .setReversed(true) - .setRowOffsetPerColumnFamily(5) - .setStartStopRowForPrefixScan(Bytes.toBytes("row_")) - .setScanMetricsEnabled(true) - .setSmall(true) - .setReadType(Scan.ReadType.STREAM) - .withStartRow(Bytes.toBytes("row_1")) - .withStopRow(Bytes.toBytes("row_2")) - .setTimeRange(0, 13); + .setACL("test_user2", new Permission(Permission.Action.READ)).setAllowPartialResults(true) + .setAsyncPrefetch(false).setAttribute("test_key", Bytes.toBytes("test_value")) + .setAuthorizations(new Authorizations("test_label")).setBatch(10).setCacheBlocks(false) + .setCaching(10).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("scan_copy_constructor").setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLimit(100).setLoadColumnFamiliesOnDemand(false).setMaxResultSize(100) + .setMaxResultsPerColumnFamily(1000).readVersions(9999).setMvccReadPoint(5) + .setNeedCursorResult(true).setPriority(1).setRaw(true).setReplicaId(3).setReversed(true) + .setRowOffsetPerColumnFamily(5).setStartStopRowForPrefixScan(Bytes.toBytes("row_")) + .setScanMetricsEnabled(true).setSmall(true).setReadType(Scan.ReadType.STREAM) + .withStartRow(Bytes.toBytes("row_1")).withStopRow(Bytes.toBytes("row_2")) + .setTimeRange(0, 13); // create a copy of existing scan object Scan scanCopy = new ImmutableScan(scan); @@ -210,8 +190,7 @@ private void testUnmodifiableSetters(Scan scanCopy) throws IOException { scanCopy.setCaching(1); throw new RuntimeException("Should not reach here"); } catch (UnsupportedOperationException e) { - assertEquals("ImmutableScan does not allow access to setCaching", - e.getMessage()); + assertEquals("ImmutableScan does not allow access to setCaching", e.getMessage()); } try { scanCopy.setLoadColumnFamiliesOnDemand(true); @@ -302,8 +281,7 @@ private void testUnmodifiableSetters(Scan scanCopy) throws IOException { scanCopy.setAllowPartialResults(true); throw new RuntimeException("Should not reach here"); } catch (UnsupportedOperationException e) { - assertEquals("ImmutableScan does not allow access to setAllowPartialResults", - e.getMessage()); + assertEquals("ImmutableScan does not allow access to setAllowPartialResults", e.getMessage()); } try { scanCopy.setId("id"); @@ -386,8 +364,7 @@ private static boolean isGetter(Method method) { || method.getName().startsWith("set")) { return false; } - return !void.class.equals(method.getReturnType()) - && !Scan.class.equals(method.getReturnType()); + return !void.class.equals(method.getReturnType()) && !Scan.class.equals(method.getReturnType()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java index 75bad5ea416f..f63ffc2fdefa 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestIncrement.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestIncrement { @ClassRule @@ -39,17 +39,17 @@ public class TestIncrement { @Test public void testIncrementInstance() { final long expected = 13; - Increment inc = new Increment(new byte [] {'r'}); + Increment inc = new Increment(new byte[] { 'r' }); int total = 0; for (int i = 0; i < 2; i++) { - byte [] bytes = Bytes.toBytes(i); + byte[] bytes = Bytes.toBytes(i); inc.addColumn(bytes, bytes, expected); total++; } - Map> familyMapOfLongs = inc.getFamilyMapOfLongs(); + Map> familyMapOfLongs = inc.getFamilyMapOfLongs(); int found = 0; - for (Map.Entry> entry: familyMapOfLongs.entrySet()) { - for (Map.Entry e: entry.getValue().entrySet()) { + for (Map.Entry> entry : familyMapOfLongs.entrySet()) { + for (Map.Entry e : entry.getValue().entrySet()) { assertEquals(expected, e.getValue().longValue()); found++; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java index 953fba777eb1..72973bb31786 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestInterfaceAlign.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ public class TestInterfaceAlign { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestInterfaceAlign.class); + HBaseClassTestRule.forClass(TestInterfaceAlign.class); /** * Test methods name match up @@ -76,8 +76,8 @@ public void testAdminWithAsyncAdmin() { private List getMethodNames(Class c) { // DON'T use the getDeclaredMethods as we want to check the Public APIs only. return Arrays.asList(c.getMethods()).stream().filter(m -> !isDeprecated(m)) - .filter(m -> !Modifier.isStatic(m.getModifiers())).map(Method::getName).distinct() - .collect(Collectors.toList()); + .filter(m -> !Modifier.isStatic(m.getModifiers())).map(Method::getName).distinct() + .collect(Collectors.toList()); } private boolean isDeprecated(Method method) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java index d48806def23d..e76ff892ae45 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMetricsConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType; -@Category({ClientTests.class, MetricsTests.class, SmallTests.class}) +@Category({ ClientTests.class, MetricsTests.class, SmallTests.class }) public class TestMetricsConnection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -56,7 +56,8 @@ public class TestMetricsConnection { private static MetricsConnection METRICS; private static final ThreadPoolExecutor BATCH_POOL = - (ThreadPoolExecutor) Executors.newFixedThreadPool(2); + (ThreadPoolExecutor) Executors.newFixedThreadPool(2); + @BeforeClass public static void beforeClass() { METRICS = new MetricsConnection("mocked-connection", () -> BATCH_POOL, () -> null); @@ -70,71 +71,54 @@ public static void afterClass() { @Test public void testStaticMetrics() throws IOException { final byte[] foo = Bytes.toBytes("foo"); - final RegionSpecifier region = RegionSpecifier.newBuilder() - .setValue(ByteString.EMPTY) - .setType(RegionSpecifierType.REGION_NAME) - .build(); + final RegionSpecifier region = RegionSpecifier.newBuilder().setValue(ByteString.EMPTY) + .setType(RegionSpecifierType.REGION_NAME).build(); final int loop = 5; for (int i = 0; i < loop; i++) { - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Get"), - GetRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Scan"), - ScanRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Multi"), - MultiRequest.getDefaultInstance(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); - METRICS.updateRpc( - ClientService.getDescriptor().findMethodByName("Mutate"), - MutateRequest.newBuilder() - .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))) - .setRegion(region) - .build(), - MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Get"), + GetRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Scan"), + ScanRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Multi"), + MultiRequest.getDefaultInstance(), MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.APPEND, new Append(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.DELETE, new Delete(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.INCREMENT, new Increment(foo))) + .setRegion(region).build(), + MetricsConnection.newCallStats()); + METRICS.updateRpc(ClientService.getDescriptor().findMethodByName("Mutate"), + MutateRequest.newBuilder() + .setMutation(ProtobufUtil.toMutation(MutationType.PUT, new Put(foo))).setRegion(region) + .build(), + MetricsConnection.newCallStats()); } - for (String method: new String[]{"Get", "Scan", "Mutate"}) { + for (String method : new String[] { "Get", "Scan", "Mutate" }) { final String metricKey = "rpcCount_" + ClientService.getDescriptor().getName() + "_" + method; final long metricVal = METRICS.rpcCounters.get(metricKey).getCount(); assertTrue("metric: " + metricKey + " val: " + metricVal, metricVal >= loop); } - for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { - METRICS.getTracker, METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, - METRICS.deleteTracker, METRICS.incrementTracker, METRICS.putTracker - }) { + for (MetricsConnection.CallTracker t : new MetricsConnection.CallTracker[] { METRICS.getTracker, + METRICS.scanTracker, METRICS.multiTracker, METRICS.appendTracker, METRICS.deleteTracker, + METRICS.incrementTracker, METRICS.putTracker }) { assertEquals("Failed to invoke callTimer on " + t, loop, t.callTimer.getCount()); assertEquals("Failed to invoke reqHist on " + t, loop, t.reqHist.getCount()); assertEquals("Failed to invoke respHist on " + t, loop, t.respHist.getCount()); } - RatioGauge executorMetrics = (RatioGauge) METRICS.getMetricRegistry() - .getMetrics().get(METRICS.getExecutorPoolName()); - RatioGauge metaMetrics = (RatioGauge) METRICS.getMetricRegistry() - .getMetrics().get(METRICS.getMetaPoolName()); + RatioGauge executorMetrics = + (RatioGauge) METRICS.getMetricRegistry().getMetrics().get(METRICS.getExecutorPoolName()); + RatioGauge metaMetrics = + (RatioGauge) METRICS.getMetricRegistry().getMetrics().get(METRICS.getMetaPoolName()); assertEquals(Ratio.of(0, 3).getValue(), executorMetrics.getValue(), 0); assertEquals(Double.NaN, metaMetrics.getValue(), 0); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java index 99699a4fea6f..c197eb35decc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestMutation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,20 +50,16 @@ public void testAppendCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Type.Put) - .setValue(Bytes.toBytes(100)) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Put) + .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("value")); origin.setTimeRange(100, 1000); Append clone = new Append(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q1"), Bytes.toBytes("value")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -73,20 +69,16 @@ public void testIncrementCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes(100)) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) + .setValue(Bytes.toBytes(100)).build()); origin.addColumn(family, Bytes.toBytes("q0"), 4); origin.setTimeRange(100, 1000); Increment clone = new Increment(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q1"), 3); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -96,12 +88,8 @@ public void testDeleteCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Type.Delete) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Type.Delete).build()); origin.addColumn(family, Bytes.toBytes("q0")); origin.addColumns(family, Bytes.toBytes("q1")); origin.addFamily(family); @@ -111,7 +99,7 @@ public void testDeleteCopyConstructor() throws IOException { assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q3")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -121,20 +109,16 @@ public void testPutCopyConstructor() throws IOException { origin.setPriority(100); byte[] family = Bytes.toBytes("CF-01"); - origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(origin.getRow()) - .setFamily(family) - .setQualifier(Bytes.toBytes("q")) - .setType(Cell.Type.Put) - .setValue(Bytes.toBytes("value")) - .build()); + origin.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(origin.getRow()) + .setFamily(family).setQualifier(Bytes.toBytes("q")).setType(Cell.Type.Put) + .setValue(Bytes.toBytes("value")).build()); origin.addColumn(family, Bytes.toBytes("q0"), Bytes.toBytes("V-01")); origin.addColumn(family, Bytes.toBytes("q1"), 100, Bytes.toBytes("V-01")); Put clone = new Put(origin); assertEquals(origin, clone); origin.addColumn(family, Bytes.toBytes("q2"), Bytes.toBytes("V-02")); - //They should have different cell lists + // They should have different cell lists assertNotEquals(origin.getCellList(family), clone.getCellList(family)); } @@ -160,10 +144,10 @@ private void assertEquals(Mutation origin, Mutation clone) { Assert.assertEquals(origin.getTimestamp(), clone.getTimestamp()); Assert.assertEquals(origin.getPriority(), clone.getPriority()); if (origin instanceof Append) { - assertEquals(((Append)origin).getTimeRange(), ((Append)clone).getTimeRange()); + assertEquals(((Append) origin).getTimeRange(), ((Append) clone).getTimeRange()); } if (origin instanceof Increment) { - assertEquals(((Increment)origin).getTimeRange(), ((Increment)clone).getTimeRange()); + assertEquals(((Increment) origin).getTimeRange(), ((Increment) clone).getTimeRange()); } } @@ -179,65 +163,54 @@ public void testRowIsImmutableOrNot() { // Test when row key is immutable Put putRowIsImmutable = new Put(rowKey, true); - assertTrue(rowKey == putRowIsImmutable.getRow()); // No local copy is made + assertTrue(rowKey == putRowIsImmutable.getRow()); // No local copy is made // Test when row key is not immutable Put putRowIsNotImmutable = new Put(rowKey, 1000L, false); - assertTrue(rowKey != putRowIsNotImmutable.getRow()); // A local copy is made + assertTrue(rowKey != putRowIsNotImmutable.getRow()); // A local copy is made } // HBASE-14882 @Test public void testAddImmutableToPut() throws IOException { - byte[] row = Bytes.toBytes("immutable-row"); - byte[] family = Bytes.toBytes("immutable-family"); + byte[] row = Bytes.toBytes("immutable-row"); + byte[] family = Bytes.toBytes("immutable-family"); byte[] qualifier0 = Bytes.toBytes("immutable-qualifier-0"); - byte[] value0 = Bytes.toBytes("immutable-value-0"); + byte[] value0 = Bytes.toBytes("immutable-value-0"); byte[] qualifier1 = Bytes.toBytes("immutable-qualifier-1"); - byte[] value1 = Bytes.toBytes("immutable-value-1"); - long ts1 = 5000L; + byte[] value1 = Bytes.toBytes("immutable-value-1"); + long ts1 = 5000L; // "true" indicates that the input row is immutable Put put = new Put(row, true); - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier0) - .setTimestamp(put.getTimestamp()) - .setType(Type.Put) - .setValue(value0) - .build()) - .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(row) - .setFamily(family) - .setQualifier(qualifier1) - .setTimestamp(ts1) - .setType(Type.Put) - .setValue(value1) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier0).setTimestamp(put.getTimestamp()).setType(Type.Put) + .setValue(value0).build()) + .add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(row).setFamily(family) + .setQualifier(qualifier1).setTimestamp(ts1).setType(Type.Put).setValue(value1).build()); // Verify the cell of family:qualifier0 Cell cell0 = put.get(family, qualifier0).get(0); // Verify no local copy is made for family, qualifier or value - assertTrue(cell0.getFamilyArray() == family); + assertTrue(cell0.getFamilyArray() == family); assertTrue(cell0.getQualifierArray() == qualifier0); - assertTrue(cell0.getValueArray() == value0); + assertTrue(cell0.getValueArray() == value0); // Verify timestamp - assertTrue(cell0.getTimestamp() == put.getTimestamp()); + assertTrue(cell0.getTimestamp() == put.getTimestamp()); // Verify the cell of family:qualifier1 Cell cell1 = put.get(family, qualifier1).get(0); // Verify no local copy is made for family, qualifier or value - assertTrue(cell1.getFamilyArray() == family); + assertTrue(cell1.getFamilyArray() == family); assertTrue(cell1.getQualifierArray() == qualifier1); - assertTrue(cell1.getValueArray() == value1); + assertTrue(cell1.getValueArray() == value1); // Verify timestamp - assertTrue(cell1.getTimestamp() == ts1); + assertTrue(cell1.getTimestamp() == ts1); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java index a56bb2c4d0f6..be4992ff05b6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestOperation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,19 +71,19 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; /** - * Run tests that use the functionality of the Operation superclass for - * Puts, Gets, Deletes, Scans, and MultiPuts. + * Run tests that use the functionality of the Operation superclass for Puts, Gets, Deletes, Scans, + * and MultiPuts. */ -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestOperation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestOperation.class); - private static byte [] ROW = Bytes.toBytes("testRow"); - private static byte [] FAMILY = Bytes.toBytes("testFamily"); - private static byte [] QUALIFIER = Bytes.toBytes("testQualifier"); - private static byte [] VALUE = Bytes.toBytes("testValue"); + private static byte[] ROW = Bytes.toBytes("testRow"); + private static byte[] FAMILY = Bytes.toBytes("testFamily"); + private static byte[] QUALIFIER = Bytes.toBytes("testQualifier"); + private static byte[] VALUE = Bytes.toBytes("testValue"); private static Gson GSON = GsonUtil.createGson().create(); @@ -105,8 +105,8 @@ public class TestOperation { private static String COL_NAME_2 = "col2"; private static ColumnRangeFilter CR_FILTER = new ColumnRangeFilter(Bytes.toBytes(COL_NAME_1), true, Bytes.toBytes(COL_NAME_2), false); - private static String STR_CR_FILTER = CR_FILTER.getClass().getSimpleName() - + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; + private static String STR_CR_FILTER = + CR_FILTER.getClass().getSimpleName() + " [" + COL_NAME_1 + ", " + COL_NAME_2 + ")"; private static int COL_COUNT = 9; private static ColumnCountGetFilter CCG_FILTER = new ColumnCountGetFilter(COL_COUNT); @@ -115,14 +115,13 @@ public class TestOperation { private static int LIMIT = 3; private static int OFFSET = 4; private static ColumnPaginationFilter CP_FILTER = new ColumnPaginationFilter(LIMIT, OFFSET); - private static String STR_CP_FILTER = CP_FILTER.getClass().getSimpleName() - + " (" + LIMIT + ", " + OFFSET + ")"; + private static String STR_CP_FILTER = + CP_FILTER.getClass().getSimpleName() + " (" + LIMIT + ", " + OFFSET + ")"; private static String STOP_ROW_KEY = "stop"; private static InclusiveStopFilter IS_FILTER = new InclusiveStopFilter(Bytes.toBytes(STOP_ROW_KEY)); - private static String STR_IS_FILTER = - IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; + private static String STR_IS_FILTER = IS_FILTER.getClass().getSimpleName() + " " + STOP_ROW_KEY; private static String PREFIX = "prefix"; private static PrefixFilter PREFIX_FILTER = new PrefixFilter(Bytes.toBytes(PREFIX)); @@ -133,9 +132,9 @@ public class TestOperation { private static String STR_MCP_FILTER = MCP_FILTER.getClass().getSimpleName() + " (3/3): [0, 1, 2]"; - private static byte[][] L_PREFIXES = { - Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2"), Bytes.toBytes("3"), - Bytes.toBytes("4"), Bytes.toBytes("5"), Bytes.toBytes("6"), Bytes.toBytes("7") }; + private static byte[][] L_PREFIXES = + { Bytes.toBytes("0"), Bytes.toBytes("1"), Bytes.toBytes("2"), Bytes.toBytes("3"), + Bytes.toBytes("4"), Bytes.toBytes("5"), Bytes.toBytes("6"), Bytes.toBytes("7") }; private static MultipleColumnPrefixFilter L_MCP_FILTER = new MultipleColumnPrefixFilter(L_PREFIXES); private static String STR_L_MCP_FILTER = @@ -165,10 +164,9 @@ public class TestOperation { private static BinaryComparator BC = new BinaryComparator(CMP_VALUE); private static DependentColumnFilter DC_FILTER = new DependentColumnFilter(FAMILY, QUALIFIER, true, CMP_OP, BC); - private static String STR_DC_FILTER = String.format( - "%s (%s, %s, %s, %s, %s)", DC_FILTER.getClass().getSimpleName(), - Bytes.toStringBinary(FAMILY), Bytes.toStringBinary(QUALIFIER), true, - CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); + private static String STR_DC_FILTER = String.format("%s (%s, %s, %s, %s, %s)", + DC_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), true, CMP_OP.name(), Bytes.toStringBinary(BC.getValue())); private static FamilyFilter FAMILY_FILTER = new FamilyFilter(CMP_OP, BC); private static String STR_FAMILY_FILTER = @@ -188,102 +186,93 @@ public class TestOperation { private static SingleColumnValueFilter SCV_FILTER = new SingleColumnValueFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); private static String STR_SCV_FILTER = String.format("%s (%s, %s, %s, %s)", - SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), - Bytes.toStringBinary(CMP_VALUE)); + SCV_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); private static SingleColumnValueExcludeFilter SCVE_FILTER = new SingleColumnValueExcludeFilter(FAMILY, QUALIFIER, CMP_OP, CMP_VALUE); private static String STR_SCVE_FILTER = String.format("%s (%s, %s, %s, %s)", - SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), - Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); - - private static FilterList AND_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ALL, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); - private static String STR_AND_FILTER_LIST = String.format( - "%s AND (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - - private static FilterList OR_FILTER_LIST = new FilterList( - Operator.MUST_PASS_ONE, Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); - private static String STR_OR_FILTER_LIST = String.format( - "%s OR (3/3): [%s, %s, %s]", AND_FILTER_LIST.getClass().getSimpleName(), - STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); - - private static FilterList L_FILTER_LIST = new FilterList( - Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, - CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); - private static String STR_L_FILTER_LIST = String.format( - "%s AND (5/8): [%s, %s, %s, %s, %s, %s]", - L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, - STR_CR_FILTER, STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); - - private static Filter[] FILTERS = { - TS_FILTER, // TimestampsFilter - L_TS_FILTER, // TimestampsFilter - COL_PRE_FILTER, // ColumnPrefixFilter - CP_FILTER, // ColumnPaginationFilter - CR_FILTER, // ColumnRangeFilter - CCG_FILTER, // ColumnCountGetFilter - IS_FILTER, // InclusiveStopFilter - PREFIX_FILTER, // PrefixFilter - PAGE_FILTER, // PageFilter - SKIP_FILTER, // SkipFilter - WHILE_FILTER, // WhileMatchFilter - KEY_ONLY_FILTER, // KeyOnlyFilter - FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - MCP_FILTER, // MultipleColumnPrefixFilter - L_MCP_FILTER, // MultipleColumnPrefixFilter - DC_FILTER, // DependentColumnFilter - FAMILY_FILTER, // FamilyFilter - QUALIFIER_FILTER, // QualifierFilter - ROW_FILTER, // RowFilter - VALUE_FILTER, // ValueFilter - SCV_FILTER, // SingleColumnValueFilter - SCVE_FILTER, // SingleColumnValueExcludeFilter - AND_FILTER_LIST, // FilterList - OR_FILTER_LIST, // FilterList - L_FILTER_LIST, // FilterList + SCVE_FILTER.getClass().getSimpleName(), Bytes.toStringBinary(FAMILY), + Bytes.toStringBinary(QUALIFIER), CMP_OP.name(), Bytes.toStringBinary(CMP_VALUE)); + + private static FilterList AND_FILTER_LIST = new FilterList(Operator.MUST_PASS_ALL, + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); + private static String STR_AND_FILTER_LIST = String.format("%s AND (3/3): [%s, %s, %s]", + AND_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + + private static FilterList OR_FILTER_LIST = new FilterList(Operator.MUST_PASS_ONE, + Arrays.asList((Filter) TS_FILTER, L_TS_FILTER, CR_FILTER)); + private static String STR_OR_FILTER_LIST = String.format("%s OR (3/3): [%s, %s, %s]", + AND_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER); + + private static FilterList L_FILTER_LIST = new FilterList(Arrays.asList((Filter) TS_FILTER, + L_TS_FILTER, CR_FILTER, COL_PRE_FILTER, CCG_FILTER, CP_FILTER, PREFIX_FILTER, PAGE_FILTER)); + private static String STR_L_FILTER_LIST = String.format("%s AND (5/8): [%s, %s, %s, %s, %s, %s]", + L_FILTER_LIST.getClass().getSimpleName(), STR_TS_FILTER, STR_L_TS_FILTER, STR_CR_FILTER, + STR_COL_PRE_FILTER, STR_CCG_FILTER, STR_CP_FILTER); + + private static Filter[] FILTERS = { TS_FILTER, // TimestampsFilter + L_TS_FILTER, // TimestampsFilter + COL_PRE_FILTER, // ColumnPrefixFilter + CP_FILTER, // ColumnPaginationFilter + CR_FILTER, // ColumnRangeFilter + CCG_FILTER, // ColumnCountGetFilter + IS_FILTER, // InclusiveStopFilter + PREFIX_FILTER, // PrefixFilter + PAGE_FILTER, // PageFilter + SKIP_FILTER, // SkipFilter + WHILE_FILTER, // WhileMatchFilter + KEY_ONLY_FILTER, // KeyOnlyFilter + FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter + MCP_FILTER, // MultipleColumnPrefixFilter + L_MCP_FILTER, // MultipleColumnPrefixFilter + DC_FILTER, // DependentColumnFilter + FAMILY_FILTER, // FamilyFilter + QUALIFIER_FILTER, // QualifierFilter + ROW_FILTER, // RowFilter + VALUE_FILTER, // ValueFilter + SCV_FILTER, // SingleColumnValueFilter + SCVE_FILTER, // SingleColumnValueExcludeFilter + AND_FILTER_LIST, // FilterList + OR_FILTER_LIST, // FilterList + L_FILTER_LIST, // FilterList }; - private static String[] FILTERS_INFO = { - STR_TS_FILTER, // TimestampsFilter - STR_L_TS_FILTER, // TimestampsFilter - STR_COL_PRE_FILTER, // ColumnPrefixFilter - STR_CP_FILTER, // ColumnPaginationFilter - STR_CR_FILTER, // ColumnRangeFilter - STR_CCG_FILTER, // ColumnCountGetFilter - STR_IS_FILTER, // InclusiveStopFilter - STR_PREFIX_FILTER, // PrefixFilter - STR_PAGE_FILTER, // PageFilter - STR_SKIP_FILTER, // SkipFilter - STR_WHILE_FILTER, // WhileMatchFilter - STR_KEY_ONLY_FILTER, // KeyOnlyFilter - STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter - STR_MCP_FILTER, // MultipleColumnPrefixFilter - STR_L_MCP_FILTER, // MultipleColumnPrefixFilter - STR_DC_FILTER, // DependentColumnFilter - STR_FAMILY_FILTER, // FamilyFilter - STR_QUALIFIER_FILTER, // QualifierFilter - STR_ROW_FILTER, // RowFilter - STR_VALUE_FILTER, // ValueFilter - STR_SCV_FILTER, // SingleColumnValueFilter - STR_SCVE_FILTER, // SingleColumnValueExcludeFilter - STR_AND_FILTER_LIST, // FilterList - STR_OR_FILTER_LIST, // FilterList - STR_L_FILTER_LIST, // FilterList + private static String[] FILTERS_INFO = { STR_TS_FILTER, // TimestampsFilter + STR_L_TS_FILTER, // TimestampsFilter + STR_COL_PRE_FILTER, // ColumnPrefixFilter + STR_CP_FILTER, // ColumnPaginationFilter + STR_CR_FILTER, // ColumnRangeFilter + STR_CCG_FILTER, // ColumnCountGetFilter + STR_IS_FILTER, // InclusiveStopFilter + STR_PREFIX_FILTER, // PrefixFilter + STR_PAGE_FILTER, // PageFilter + STR_SKIP_FILTER, // SkipFilter + STR_WHILE_FILTER, // WhileMatchFilter + STR_KEY_ONLY_FILTER, // KeyOnlyFilter + STR_FIRST_KEY_ONLY_FILTER, // FirstKeyOnlyFilter + STR_MCP_FILTER, // MultipleColumnPrefixFilter + STR_L_MCP_FILTER, // MultipleColumnPrefixFilter + STR_DC_FILTER, // DependentColumnFilter + STR_FAMILY_FILTER, // FamilyFilter + STR_QUALIFIER_FILTER, // QualifierFilter + STR_ROW_FILTER, // RowFilter + STR_VALUE_FILTER, // ValueFilter + STR_SCV_FILTER, // SingleColumnValueFilter + STR_SCVE_FILTER, // SingleColumnValueExcludeFilter + STR_AND_FILTER_LIST, // FilterList + STR_OR_FILTER_LIST, // FilterList + STR_L_FILTER_LIST, // FilterList }; static { - assertEquals("The sizes of static arrays do not match: " - + "[FILTERS: %d <=> FILTERS_INFO: %d]", - FILTERS.length, FILTERS_INFO.length); + assertEquals("The sizes of static arrays do not match: " + "[FILTERS: %d <=> FILTERS_INFO: %d]", + FILTERS.length, FILTERS_INFO.length); } /** - * Test the client Operations' JSON encoding to ensure that produced JSON is - * parseable and that the details are present and not corrupted. - * + * Test the client Operations' JSON encoding to ensure that produced JSON is parseable and that + * the details are present and not corrupted. * @throws IOException if the JSON conversion fails */ @Test @@ -297,16 +286,14 @@ public void testOperationJSON() throws IOException { }.getType(); Map parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("startRow incorrect in Scan.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("startRow")); + assertEquals("startRow incorrect in Scan.toJSON()", Bytes.toStringBinary(ROW), + parsedJSON.get("startRow")); // check for the family and the qualifier. - List familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + List familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Scan.toJSON()", familyInfo); assertEquals("Qualifier absent in Scan.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Scan.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); + assertEquals("Qualifier incorrect in Scan.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); // produce a Get Operation Get get = new Get(ROW); @@ -315,16 +302,13 @@ public void testOperationJSON() throws IOException { json = get.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row incorrect in Get.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row incorrect in Get.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Get.toJSON()", familyInfo); assertEquals("Qualifier absent in Get.toJSON()", 1, familyInfo.size()); - assertEquals("Qualifier incorrect in Get.toJSON()", - Bytes.toStringBinary(QUALIFIER), - familyInfo.get(0)); + assertEquals("Qualifier incorrect in Get.toJSON()", Bytes.toStringBinary(QUALIFIER), + familyInfo.get(0)); // produce a Put operation Put put = new Put(ROW); @@ -333,17 +317,14 @@ public void testOperationJSON() throws IOException { json = put.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row absent in Put.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row absent in Put.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Put.toJSON()", familyInfo); assertEquals("KeyValue absent in Put.toJSON()", 1, familyInfo.size()); Map kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Put.toJSON()", - Bytes.toStringBinary(QUALIFIER), - kvMap.get("qualifier")); + assertEquals("Qualifier incorrect in Put.toJSON()", Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); assertEquals("Value length incorrect in Put.toJSON()", VALUE.length, ((Number) kvMap.get("vlen")).intValue()); @@ -354,16 +335,14 @@ public void testOperationJSON() throws IOException { json = delete.toJSON(); parsedJSON = GSON.fromJson(json, typeOfHashMap); // check for the row - assertEquals("row absent in Delete.toJSON()", - Bytes.toStringBinary(ROW), parsedJSON.get("row")); + assertEquals("row absent in Delete.toJSON()", Bytes.toStringBinary(ROW), parsedJSON.get("row")); // check for the family and the qualifier. - familyInfo = (List) ((Map) parsedJSON.get("families")).get( - Bytes.toStringBinary(FAMILY)); + familyInfo = (List) ((Map) parsedJSON.get("families")).get(Bytes.toStringBinary(FAMILY)); assertNotNull("Family absent in Delete.toJSON()", familyInfo); assertEquals("KeyValue absent in Delete.toJSON()", 1, familyInfo.size()); kvMap = (Map) familyInfo.get(0); - assertEquals("Qualifier incorrect in Delete.toJSON()", - Bytes.toStringBinary(QUALIFIER), kvMap.get("qualifier")); + assertEquals("Qualifier incorrect in Delete.toJSON()", Bytes.toStringBinary(QUALIFIER), + kvMap.get("qualifier")); } @Test @@ -386,7 +365,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2013L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -395,7 +374,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2001L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(HConstants.LATEST_TIMESTAMP, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -405,7 +384,7 @@ public void testPutCreationWithByteBuffer() { c = p.get(FAMILY, QUALIFIER); Assert.assertEquals(1, c.size()); Assert.assertEquals(2001L, c.get(0).getTimestamp()); - Assert.assertArrayEquals(new byte[]{}, CellUtil.cloneValue(c.get(0))); + Assert.assertArrayEquals(new byte[] {}, CellUtil.cloneValue(c.get(0))); Assert.assertArrayEquals(ROW, CellUtil.cloneRow(c.get(0))); Assert.assertEquals(1970L, p.getTimestamp()); Assert.assertEquals(0, CellComparatorImpl.COMPARATOR.compare(c.get(0), new KeyValue(c.get(0)))); @@ -414,29 +393,17 @@ public void testPutCreationWithByteBuffer() { @Test @SuppressWarnings("rawtypes") public void testOperationSubClassMethodsAreBuilderStyle() { - /* All Operation subclasses should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * Scan scan = new Scan() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * All Operation subclasses should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: Scan scan = new Scan() .setFoo(foo) .setBar(bar) + * .setBuz(buz) This test ensures that all methods starting with "set" returns the declaring + * object */ // TODO: We should ensure all subclasses of Operation is checked. - Class[] classes = new Class[] { - Operation.class, - OperationWithAttributes.class, - Mutation.class, - Query.class, - Delete.class, - Increment.class, - Append.class, - Put.class, - Get.class, - Scan.class}; + Class[] classes = + new Class[] { Operation.class, OperationWithAttributes.class, Mutation.class, Query.class, + Delete.class, Increment.class, Append.class, Put.class, Get.class, Scan.class }; BuilderStyleTest.assertClassesAreBuilderStyle(classes); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java index 01740e98461f..6debb1b9e729 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestProcedureFuture.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos.GetProcedureResultResponse; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestProcedureFuture { @ClassRule @@ -71,12 +71,11 @@ public boolean wasConvertResultCalled() { } @Override - protected GetProcedureResultResponse getProcedureResult( - final GetProcedureResultRequest request) throws IOException { + protected GetProcedureResultResponse getProcedureResult(final GetProcedureResultRequest request) + throws IOException { getProcedureResultCalled = true; return GetProcedureResultResponse.newBuilder() - .setState(GetProcedureResultResponse.State.FINISHED) - .build(); + .setState(GetProcedureResultResponse.State.FINISHED).build(); } @Override @@ -86,8 +85,7 @@ protected Void convertResult(final GetProcedureResultResponse response) throws I } @Override - protected Void waitOperationResult(final long deadlineTs) - throws IOException, TimeoutException { + protected Void waitOperationResult(final long deadlineTs) throws IOException, TimeoutException { waitOperationResultCalled = true; return null; } @@ -101,8 +99,7 @@ protected Void postOperationResult(final Void result, final long deadlineTs) } /** - * When a master return a result with procId, - * we are skipping the waitOperationResult() call, + * When a master return a result with procId, we are skipping the waitOperationResult() call, * since we are getting the procedure result. */ @Test @@ -126,13 +123,13 @@ public void testWithProcIdAndSpinning() throws Exception { HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); TestFuture f = new TestFuture(admin, 100L) { @Override - protected GetProcedureResultResponse getProcedureResult( - final GetProcedureResultRequest request) throws IOException { + protected GetProcedureResultResponse + getProcedureResult(final GetProcedureResultRequest request) throws IOException { boolean done = spinCount.incrementAndGet() >= 10; return GetProcedureResultResponse.newBuilder() - .setState(done ? GetProcedureResultResponse.State.FINISHED : - GetProcedureResultResponse.State.RUNNING) - .build(); + .setState(done ? GetProcedureResultResponse.State.FINISHED + : GetProcedureResultResponse.State.RUNNING) + .build(); } }; f.get(1, TimeUnit.MINUTES); @@ -144,8 +141,7 @@ protected GetProcedureResultResponse getProcedureResult( } /** - * When a master return a result without procId, - * we are skipping the getProcedureResult() call. + * When a master return a result without procId, we are skipping the getProcedureResult() call. */ @Test public void testWithoutProcId() throws Exception { @@ -160,20 +156,19 @@ public void testWithoutProcId() throws Exception { } /** - * When a new client with procedure support tries to ask an old-master without proc-support - * the procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException) - * The future should trap that and fallback to the waitOperationResult(). - * - * This happens when the operation calls happens on a "new master" but while we are waiting - * the operation to be completed, we failover on an "old master". + * When a new client with procedure support tries to ask an old-master without proc-support the + * procedure result we get a DoNotRetryIOException (which is an UnsupportedOperationException) The + * future should trap that and fallback to the waitOperationResult(). This happens when the + * operation calls happens on a "new master" but while we are waiting the operation to be + * completed, we failover on an "old master". */ @Test public void testOnServerWithNoProcedureSupport() throws Exception { HBaseAdmin admin = Mockito.mock(HBaseAdmin.class); TestFuture f = new TestFuture(admin, 100L) { @Override - protected GetProcedureResultResponse getProcedureResult( - final GetProcedureResultRequest request) throws IOException { + protected GetProcedureResultResponse + getProcedureResult(final GetProcedureResultRequest request) throws IOException { super.getProcedureResult(request); throw new DoNotRetryIOException(new UnsupportedOperationException("getProcedureResult")); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java index ef9d4c96d282..b1f560f5509f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutDotHas.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,10 +27,9 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) /** - * Addresses HBASE-6047 - * We test put.has call with all of its polymorphic magic + * Addresses HBASE-6047 We test put.has call with all of its polymorphic magic */ public class TestPutDotHas { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java index 8572c0b47a1a..f57145cdb1b2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestPutWriteToWal.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.client; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java index 3b66f7eb2e60..f7c36d00c28d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ public class TestRegionInfoBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionInfoBuilder.class); + HBaseClassTestRule.forClass(TestRegionInfoBuilder.class); @Rule public TableNameTestRule name = new TableNameTestRule(); @@ -114,7 +114,7 @@ public void testCreateRegionInfoName() throws Exception { public void testContainsRange() { TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(name.getTableName()).build(); RegionInfo ri = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("g")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("g")).build(); // Single row range at start of region assertTrue(ri.containsRange(Bytes.toBytes("a"), Bytes.toBytes("a"))); // Fully contained range @@ -175,9 +175,9 @@ public void testContainsRangeForMetaTable() { public void testLastRegionCompare() { TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(name.getTableName()).build(); RegionInfo rip = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(new byte[0]).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(new byte[0]).build(); RegionInfo ric = RegionInfoBuilder.newBuilder(tableDesc.getTableName()) - .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); + .setStartKey(Bytes.toBytes("a")).setEndKey(Bytes.toBytes("b")).build(); assertTrue(RegionInfo.COMPARATOR.compare(rip, ric) > 0); } @@ -191,9 +191,9 @@ public void testComparator() { final TableName tableName = name.getTableName(); byte[] empty = new byte[0]; RegionInfo older = RegionInfoBuilder.newBuilder(tableName).setStartKey(empty).setEndKey(empty) - .setSplit(false).setRegionId(0L).build(); + .setSplit(false).setRegionId(0L).build(); RegionInfo newer = RegionInfoBuilder.newBuilder(tableName).setStartKey(empty).setEndKey(empty) - .setSplit(false).setRegionId(1L).build(); + .setSplit(false).setRegionId(1L).build(); assertTrue(RegionInfo.COMPARATOR.compare(older, newer) < 0); assertTrue(RegionInfo.COMPARATOR.compare(newer, older) > 0); assertTrue(RegionInfo.COMPARATOR.compare(older, older) == 0); @@ -259,7 +259,7 @@ public void testParseName() throws IOException { @Test public void testConvert() { final TableName tableName = - TableName.valueOf("ns1:" + name.getTableName().getQualifierAsString()); + TableName.valueOf("ns1:" + name.getTableName().getQualifierAsString()); byte[] startKey = Bytes.toBytes("startKey"); byte[] endKey = Bytes.toBytes("endKey"); boolean split = false; @@ -267,7 +267,7 @@ public void testConvert() { int replicaId = 42; RegionInfo ri = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).setEndKey(endKey) - .setSplit(split).setRegionId(regionId).setReplicaId(replicaId).build(); + .setSplit(split).setRegionId(regionId).setReplicaId(replicaId).build(); // convert two times, compare RegionInfo convertedRi = ProtobufUtil.toRegionInfo(ProtobufUtil.toRegionInfo(ri)); @@ -276,16 +276,16 @@ public void testConvert() { // test convert RegionInfo without replicaId HBaseProtos.RegionInfo info = HBaseProtos.RegionInfo.newBuilder() - .setTableName(HBaseProtos.TableName.newBuilder() - .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())) - .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())).build()) - .setStartKey(UnsafeByteOperations.unsafeWrap(startKey)) - .setEndKey(UnsafeByteOperations.unsafeWrap(endKey)).setSplit(split).setRegionId(regionId) - .build(); + .setTableName(HBaseProtos.TableName.newBuilder() + .setQualifier(UnsafeByteOperations.unsafeWrap(tableName.getQualifier())) + .setNamespace(UnsafeByteOperations.unsafeWrap(tableName.getNamespace())).build()) + .setStartKey(UnsafeByteOperations.unsafeWrap(startKey)) + .setEndKey(UnsafeByteOperations.unsafeWrap(endKey)).setSplit(split).setRegionId(regionId) + .build(); convertedRi = ProtobufUtil.toRegionInfo(info); RegionInfo expectedRi = RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey) - .setEndKey(endKey).setSplit(split).setRegionId(regionId).setReplicaId(0).build(); + .setEndKey(endKey).setSplit(split).setRegionId(regionId).setReplicaId(0).build(); assertEquals(expectedRi, convertedRi); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java index f72fb66ef734..8a6478d1a381 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionInfoDisplay.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,36 +35,34 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestRegionInfoDisplay { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRegionInfoDisplay.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); @Test public void testRegionDetailsForDisplay() throws IOException { - byte[] startKey = new byte[] {0x01, 0x01, 0x02, 0x03}; - byte[] endKey = new byte[] {0x01, 0x01, 0x02, 0x04}; + byte[] startKey = new byte[] { 0x01, 0x01, 0x02, 0x03 }; + byte[] endKey = new byte[] { 0x01, 0x01, 0x02, 0x04 }; Configuration conf = new Configuration(); conf.setBoolean("hbase.display.keys", false); RegionInfo ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(startKey).setEndKey(endKey).build(); + .setStartKey(startKey).setEndKey(endKey).build(); checkEquality(ri, conf); // check HRIs with non-default replicaId - ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setStartKey(startKey) - .setEndKey(endKey) - .setSplit(false) - .setRegionId(EnvironmentEdgeManager.currentTime()) - .setReplicaId(1).build(); + ri = RegionInfoBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setStartKey(startKey) + .setEndKey(endKey).setSplit(false).setRegionId(EnvironmentEdgeManager.currentTime()) + .setReplicaId(1).build(); checkEquality(ri, conf); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_END_KEY, - RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); + RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); Assert.assertArrayEquals(RegionInfoDisplay.HIDDEN_START_KEY, - RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); + RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); RegionState state = RegionState.createForTesting(convert(ri), RegionState.State.OPEN); String descriptiveNameForDisplay = @@ -76,25 +74,22 @@ public void testRegionDetailsForDisplay() throws IOException { Assert.assertArrayEquals(endKey, RegionInfoDisplay.getEndKeyForDisplay(ri, conf)); Assert.assertArrayEquals(startKey, RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); Assert.assertEquals(originalDescriptive, - RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf)); + RegionInfoDisplay.getDescriptiveNameFromRegionStateForDisplay(state, conf)); } private void checkDescriptiveNameEquality(String descriptiveNameForDisplay, String origDesc, byte[] startKey) { // except for the "hidden-start-key" substring everything else should exactly match - String firstPart = descriptiveNameForDisplay.substring(0, - descriptiveNameForDisplay.indexOf( - new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8))); - String secondPart = descriptiveNameForDisplay.substring( - descriptiveNameForDisplay.indexOf( - new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8)) + - RegionInfoDisplay.HIDDEN_START_KEY.length); + String firstPart = descriptiveNameForDisplay.substring(0, descriptiveNameForDisplay + .indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8))); + String secondPart = descriptiveNameForDisplay.substring(descriptiveNameForDisplay + .indexOf(new String(RegionInfoDisplay.HIDDEN_START_KEY, StandardCharsets.UTF_8)) + + RegionInfoDisplay.HIDDEN_START_KEY.length); String firstPartOrig = origDesc.substring(0, origDesc.indexOf(Bytes.toStringBinary(startKey))); String secondPartOrig = origDesc.substring( - origDesc.indexOf(Bytes.toStringBinary(startKey)) + - Bytes.toStringBinary(startKey).length()); - assert(firstPart.equals(firstPartOrig)); - assert(secondPart.equals(secondPartOrig)); + origDesc.indexOf(Bytes.toStringBinary(startKey)) + Bytes.toStringBinary(startKey).length()); + assert (firstPart.equals(firstPartOrig)); + assert (secondPart.equals(secondPartOrig)); } private void checkEquality(RegionInfo ri, Configuration conf) throws IOException { @@ -103,18 +98,18 @@ private void checkEquality(RegionInfo ri, Configuration conf) throws IOException byte[][] modifiedRegionNameParts = RegionInfo.parseRegionName(modifiedRegionName); byte[][] regionNameParts = RegionInfo.parseRegionName(ri.getRegionName()); - //same number of parts - assert(modifiedRegionNameParts.length == regionNameParts.length); + // same number of parts + assert (modifiedRegionNameParts.length == regionNameParts.length); for (int i = 0; i < regionNameParts.length; i++) { // all parts should match except for [1] where in the modified one, // we should have "hidden_start_key" if (i != 1) { - System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + - Bytes.toString(modifiedRegionNameParts[i])); + System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + + Bytes.toString(modifiedRegionNameParts[i])); Assert.assertArrayEquals(regionNameParts[i], modifiedRegionNameParts[i]); } else { - System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + - Bytes.toString(modifiedRegionNameParts[i])); + System.out.println("" + i + " " + Bytes.toString(regionNameParts[i]) + " " + + Bytes.toString(modifiedRegionNameParts[i])); Assert.assertNotEquals(regionNameParts[i], modifiedRegionNameParts[i]); Assert.assertArrayEquals(modifiedRegionNameParts[1], RegionInfoDisplay.getStartKeyForDisplay(ri, conf)); @@ -123,8 +118,8 @@ private void checkEquality(RegionInfo ri, Configuration conf) throws IOException } private HRegionInfo convert(RegionInfo ri) { - HRegionInfo hri =new HRegionInfo(ri.getTable(), ri.getStartKey(), ri.getEndKey(), - ri.isSplit(), ri.getRegionId()); + HRegionInfo hri = new HRegionInfo(ri.getTable(), ri.getStartKey(), ri.getEndKey(), ri.isSplit(), + ri.getRegionId()); hri.setOffline(ri.isOffline()); return hri; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocatorTracing.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocatorTracing.java index a0415da28d05..e435eb4de53c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocatorTracing.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegionLocatorTracing.java @@ -26,6 +26,7 @@ import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.containsInAnyOrder; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.trace.data.SpanData; @@ -43,13 +44,14 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; @Category({ ClientTests.class, MediumTests.class }) public class TestRegionLocatorTracing extends TestTracingBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionLocatorTracing.class); + HBaseClassTestRule.forClass(TestRegionLocatorTracing.class); ConnectionImplementation conn; @@ -69,13 +71,11 @@ public void tearDown() throws IOException { public void testGetRegionLocation() throws IOException { conn.getRegionLocator(TableName.META_TABLE_NAME).getRegionLocation(HConstants.EMPTY_START_ROW); SpanData span = waitSpan("HRegionLocator.getRegionLocation"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( - containsEntryWithStringValuesOf("db.hbase.regions", + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME), + hasAttributes(containsEntryWithStringValuesOf("db.hbase.regions", META_REGION_LOCATION.getDefaultRegionLocation().getRegion().getRegionNameAsString())))); } @@ -84,16 +84,12 @@ public void testGetRegionLocations() throws IOException { conn.getRegionLocator(TableName.META_TABLE_NAME).getRegionLocations(HConstants.EMPTY_START_ROW); SpanData span = waitSpan("HRegionLocator.getRegionLocations"); // TODO: Use a value of `META_REGION_LOCATION` that contains multiple region locations. - String[] expectedRegions = Arrays.stream(META_REGION_LOCATION.getRegionLocations()) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .toArray(String[]::new); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + String[] expectedRegions = + Arrays.stream(META_REGION_LOCATION.getRegionLocations()).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( + buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } @@ -102,16 +98,12 @@ public void testGetAllRegionLocations() throws IOException { conn.getRegionLocator(TableName.META_TABLE_NAME).getAllRegionLocations(); SpanData span = waitSpan("HRegionLocator.getAllRegionLocations"); // TODO: Use a value of `META_REGION_LOCATION` that contains multiple region locations. - String[] expectedRegions = Arrays.stream(META_REGION_LOCATION.getRegionLocations()) - .map(HRegionLocation::getRegion) - .map(RegionInfo::getRegionNameAsString) - .toArray(String[]::new); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), + String[] expectedRegions = + Arrays.stream(META_REGION_LOCATION.getRegionLocations()).map(HRegionLocation::getRegion) + .map(RegionInfo::getRegionNameAsString).toArray(String[]::new); + assertThat(span, allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME), - hasAttributes( + buildTableAttributesMatcher(TableName.META_TABLE_NAME), hasAttributes( containsEntryWithStringValuesOf("db.hbase.regions", containsInAnyOrder(expectedRegions))))); } @@ -119,11 +111,10 @@ public void testGetAllRegionLocations() throws IOException { public void testClearRegionLocationCache() throws IOException { conn.getRegionLocator(TableName.META_TABLE_NAME).clearRegionLocationCache(); SpanData span = waitSpan("HRegionLocator.clearRegionLocationCache"); - assertThat(span, allOf( - hasStatusWithCode(StatusCode.OK), - hasKind(SpanKind.CLIENT), - buildConnectionAttributesMatcher(conn), - buildTableAttributesMatcher(TableName.META_TABLE_NAME))); + assertThat(span, + allOf(hasStatusWithCode(StatusCode.OK), hasKind(SpanKind.CLIENT), + buildConnectionAttributesMatcher(conn), + buildTableAttributesMatcher(TableName.META_TABLE_NAME))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegistryEndpointsRefresher.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegistryEndpointsRefresher.java index 3d6fe1563b8c..a20b4c0aeaa1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegistryEndpointsRefresher.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRegistryEndpointsRefresher.java @@ -44,14 +44,14 @@ public class TestRegistryEndpointsRefresher { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegistryEndpointsRefresher.class); + HBaseClassTestRule.forClass(TestRegistryEndpointsRefresher.class); private static final String INITIAL_DELAY_SECS_CONFIG_NAME = - "hbase.test.registry.initial.delay.secs"; + "hbase.test.registry.initial.delay.secs"; private static final String INTERVAL_SECS_CONFIG_NAME = - "hbase.test.registry.refresh.interval.secs"; + "hbase.test.registry.refresh.interval.secs"; private static final String MIN_INTERVAL_SECS_CONFIG_NAME = - "hbase.test.registry.refresh.min.interval.secs"; + "hbase.test.registry.refresh.min.interval.secs"; private Configuration conf; private RegistryEndpointsRefresher refresher; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java index 5b591030c966..ded41dd58a70 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestResultStatsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,15 +29,14 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestResultStatsUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestResultStatsUtil.class); + HBaseClassTestRule.forClass(TestResultStatsUtil.class); - private static final RegionLoadStats regionLoadStats = new RegionLoadStats(100, - 10,90); - private static final byte[] regionName = {80}; + private static final RegionLoadStats regionLoadStats = new RegionLoadStats(100, 10, 90); + private static final byte[] regionName = { 80 }; private static final ServerName server = ServerName.parseServerName("3.1.yg.n,50,1"); @Test @@ -51,12 +50,12 @@ public void testUpdateStats() { // Check that the tracker was updated as expected ServerStatistics stats = serverStatisticTracker.getStats(server); - assertEquals(regionLoadStats.memstoreLoad, stats.getStatsForRegion(regionName) - .getMemStoreLoadPercent()); - assertEquals(regionLoadStats.compactionPressure, stats.getStatsForRegion(regionName) - .getCompactionPressure()); - assertEquals(regionLoadStats.heapOccupancy, stats.getStatsForRegion(regionName) - .getHeapOccupancyPercent()); + assertEquals(regionLoadStats.memstoreLoad, + stats.getStatsForRegion(regionName).getMemStoreLoadPercent()); + assertEquals(regionLoadStats.compactionPressure, + stats.getStatsForRegion(regionName).getCompactionPressure()); + assertEquals(regionLoadStats.heapOccupancy, + stats.getStatsForRegion(regionName).getHeapOccupancyPercent()); } @Test diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java index 7b584e948610..463036cf8903 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRetriesExhaustedWithDetailsException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,14 +31,15 @@ import org.junit.rules.TestName; import org.mockito.Mockito; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRetriesExhaustedWithDetailsException { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRetriesExhaustedWithDetailsException.class); - @Rule public TestName name = new TestName(); + @Rule + public TestName name = new TestName(); /** * Assert that a RetriesExhaustedException that has RegionTooBusyException outputs region name. diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java index 67976b8233ad..04c898e946fa 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestReversedScannerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import static org.mockito.Mockito.when; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; @@ -81,11 +80,10 @@ public void setUp() throws Exception { @Test public void testPrepareAlwaysUsesCache() throws Exception { - when(connection.locateRegion(TABLE_NAME, ROW, true, true, 0)) - .thenReturn(regionLocations); + when(connection.locateRegion(TABLE_NAME, ROW, true, true, 0)).thenReturn(regionLocations); ReversedScannerCallable callable = - new ReversedScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); + new ReversedScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); callable.prepare(false); callable.prepare(true); @@ -105,7 +103,7 @@ public void testHandleDisabledTable() throws IOException { @Test public void testUpdateSearchKeyCacheLocation() throws IOException { byte[] regionName = RegionInfo.createRegionName(TABLE_NAME, - ConnectionUtils.createCloseRowBefore(ConnectionUtils.MAX_BYTE_ARRAY), "123", false); + ConnectionUtils.createCloseRowBefore(ConnectionUtils.MAX_BYTE_ARRAY), "123", false); HRegionInfo mockRegionInfo = mock(HRegionInfo.class); when(mockRegionInfo.containsRow(ConnectionUtils.MAX_BYTE_ARRAY)).thenReturn(true); when(mockRegionInfo.getEndKey()).thenReturn(HConstants.EMPTY_END_ROW); @@ -125,7 +123,7 @@ public void testUpdateSearchKeyCacheLocation() throws IOException { callable.throwable(testThrowable, true); - verify(connection).updateCachedLocations(TABLE_NAME, regionName, - ConnectionUtils.MAX_BYTE_ARRAY, testThrowable, SERVERNAME); + verify(connection).updateCachedLocations(TABLE_NAME, regionName, ConnectionUtils.MAX_BYTE_ARRAY, + testThrowable, SERVERNAME); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java index 64983089ae06..eb7cde981a2c 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRowComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,15 +35,15 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestRowComparator { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRowComparator.class); - private static final List DEFAULT_ROWS = IntStream.range(1, 9) - .mapToObj(String::valueOf).map(Bytes::toBytes).collect(Collectors.toList()); + private static final List DEFAULT_ROWS = IntStream.range(1, 9).mapToObj(String::valueOf) + .map(Bytes::toBytes).collect(Collectors.toList()); @Test public void testPut() { @@ -71,8 +71,7 @@ public void testGet() { } private static void test(Function f) { - List rows = new ArrayList(DEFAULT_ROWS.stream() - .map(f).collect(Collectors.toList())); + List rows = new ArrayList(DEFAULT_ROWS.stream().map(f).collect(Collectors.toList())); do { Collections.shuffle(rows); } while (needShuffle(rows)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java index e534ab094d9d..888ca011fa15 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestRpcBasedRegistryHedgedReads.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,22 +64,22 @@ public class TestRpcBasedRegistryHedgedReads { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRpcBasedRegistryHedgedReads.class); + HBaseClassTestRule.forClass(TestRpcBasedRegistryHedgedReads.class); private static final Logger LOG = LoggerFactory.getLogger(TestRpcBasedRegistryHedgedReads.class); private static final String HEDGED_REQS_FANOUT_CONFIG_NAME = "hbase.test.hedged.reqs.fanout"; private static final String INITIAL_DELAY_SECS_CONFIG_NAME = - "hbase.test.refresh.initial.delay.secs"; + "hbase.test.refresh.initial.delay.secs"; private static final String REFRESH_INTERVAL_SECS_CONFIG_NAME = - "hbase.test.refresh.interval.secs"; + "hbase.test.refresh.interval.secs"; private static final String MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME = - "hbase.test.min.refresh.interval.secs"; + "hbase.test.min.refresh.interval.secs"; private static final HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility(); private static final ExecutorService EXECUTOR = - Executors.newCachedThreadPool(new ThreadFactoryBuilder().setDaemon(true).build()); + Executors.newCachedThreadPool(new ThreadFactoryBuilder().setDaemon(true).build()); private static Set BOOTSTRAP_NODES; @@ -90,12 +90,12 @@ public class TestRpcBasedRegistryHedgedReads { private static volatile Set GOOD_RESP_INDEXS; private static GetClusterIdResponse RESP = - GetClusterIdResponse.newBuilder().setClusterId("id").build(); + GetClusterIdResponse.newBuilder().setClusterId("id").build(); public static final class RpcClientImpl implements RpcClient { public RpcClientImpl(Configuration configuration, String clusterId, SocketAddress localAddress, - MetricsConnection metrics) { + MetricsConnection metrics) { } @Override @@ -130,7 +130,7 @@ public static final class RpcChannelImpl implements RpcChannel { @Override public void callMethod(MethodDescriptor method, RpcController controller, Message request, - Message responsePrototype, RpcCallback done) { + Message responsePrototype, RpcCallback done) { if (!method.getName().equals("GetClusterId")) { // On RPC failures, MasterRegistry internally runs getMasters() RPC to keep the master list // fresh. We do not want to intercept those RPCs here and double count. @@ -155,8 +155,8 @@ private AbstractRpcBasedConnectionRegistry createRegistry(int hedged) throws IOE Configuration conf = UTIL.getConfiguration(); conf.setInt(HEDGED_REQS_FANOUT_CONFIG_NAME, hedged); return new AbstractRpcBasedConnectionRegistry(conf, HEDGED_REQS_FANOUT_CONFIG_NAME, - INITIAL_DELAY_SECS_CONFIG_NAME, REFRESH_INTERVAL_SECS_CONFIG_NAME, - MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME) { + INITIAL_DELAY_SECS_CONFIG_NAME, REFRESH_INTERVAL_SECS_CONFIG_NAME, + MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME) { @Override protected Set getBootstrapNodes(Configuration conf) throws IOException { @@ -168,7 +168,8 @@ protected CompletableFuture> fetchEndpoints() { return CompletableFuture.completedFuture(BOOTSTRAP_NODES); } - @Override public String getConnectionString() { + @Override + public String getConnectionString() { return "unimplemented"; } }; @@ -184,8 +185,8 @@ public static void setUpBeforeClass() { conf.setLong(REFRESH_INTERVAL_SECS_CONFIG_NAME, Integer.MAX_VALUE); conf.setLong(MIN_REFRESH_INTERVAL_SECS_CONFIG_NAME, Integer.MAX_VALUE - 1); BOOTSTRAP_NODES = IntStream.range(0, 10) - .mapToObj(i -> ServerName.valueOf("localhost", (10000 + 100 * i), ServerName.NON_STARTCODE)) - .collect(Collectors.toSet()); + .mapToObj(i -> ServerName.valueOf("localhost", (10000 + 100 * i), ServerName.NON_STARTCODE)) + .collect(Collectors.toSet()); } @AfterClass @@ -229,7 +230,7 @@ public void testAllFailHedged3() throws IOException { @Test public void testFirstSucceededNoHedge() throws IOException { GOOD_RESP_INDEXS = - IntStream.range(0, 10).mapToObj(Integer::valueOf).collect(Collectors.toSet()); + IntStream.range(0, 10).mapToObj(Integer::valueOf).collect(Collectors.toSet()); // will be set to 1 try (AbstractRpcBasedConnectionRegistry registry = createRegistry(0)) { String clusterId = logIfError(registry.getClusterId()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java index fe538054d9f0..faf59490ddd8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Set; - import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HConstants; @@ -40,15 +39,15 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; // TODO: cover more test cases -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestScan { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestScan.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestScan.class); @Test public void testAttributesSerialization() throws IOException { @@ -71,22 +70,14 @@ public void testAttributesSerialization() throws IOException { @Test public void testGetToScan() throws Exception { Get get = new Get(Bytes.toBytes(1)); - get.setCacheBlocks(true) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("get") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultsPerColumnFamily(1000) - .setMaxVersions(9999) - .setRowOffsetPerColumnFamily(5) - .setTimeRange(0, 13) - .setAttribute("att_v0", Bytes.toBytes("att_v0")) - .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123) - .setReplicaId(3) - .setACL("test_user", new Permission(Permission.Action.READ)) - .setAuthorizations(new Authorizations("test_label")) - .setPriority(3); + get.setCacheBlocks(true).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("get").setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLoadColumnFamiliesOnDemand(false).setMaxResultsPerColumnFamily(1000) + .setMaxVersions(9999).setRowOffsetPerColumnFamily(5).setTimeRange(0, 13) + .setAttribute("att_v0", Bytes.toBytes("att_v0")) + .setColumnFamilyTimeRange(Bytes.toBytes("cf"), 0, 123).setReplicaId(3) + .setACL("test_user", new Permission(Permission.Action.READ)) + .setAuthorizations(new Authorizations("test_label")).setPriority(3); Scan scan = new Scan(get); assertEquals(get.getCacheBlocks(), scan.getCacheBlocks()); @@ -95,7 +86,7 @@ public void testGetToScan() throws Exception { assertEquals(get.getId(), scan.getId()); assertEquals(get.getIsolationLevel(), scan.getIsolationLevel()); assertEquals(get.getLoadColumnFamiliesOnDemandValue(), - scan.getLoadColumnFamiliesOnDemandValue()); + scan.getLoadColumnFamiliesOnDemandValue()); assertEquals(get.getMaxResultsPerColumnFamily(), scan.getMaxResultsPerColumnFamily()); assertEquals(get.getMaxVersions(), scan.getMaxVersions()); assertEquals(get.getRowOffsetPerColumnFamily(), scan.getRowOffsetPerColumnFamily()); @@ -103,9 +94,9 @@ public void testGetToScan() throws Exception { assertEquals(get.getTimeRange().getMax(), scan.getTimeRange().getMax()); assertTrue(Bytes.equals(get.getAttribute("att_v0"), scan.getAttribute("att_v0"))); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin(), - scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMin()); assertEquals(get.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax(), - scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); + scan.getColumnFamilyTimeRange().get(Bytes.toBytes("cf")).getMax()); assertEquals(get.getReplicaId(), scan.getReplicaId()); assertEquals(get.getACL(), scan.getACL()); assertEquals(get.getAuthorizations().getLabels(), scan.getAuthorizations().getLabels()); @@ -126,22 +117,22 @@ public void testScanAttributes() { scan.setAttribute("attribute1", Bytes.toBytes("value1")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value1"), - scan.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value1"), scan.getAttributesMap().get("attribute1"))); // overriding attribute value scan.setAttribute("attribute1", Bytes.toBytes("value12")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), scan.getAttribute("attribute1"))); Assert.assertEquals(1, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value12"), - scan.getAttributesMap().get("attribute1"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value12"), scan.getAttributesMap().get("attribute1"))); // adding another attribute scan.setAttribute("attribute2", Bytes.toBytes("value2")); Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), scan.getAttribute("attribute2"))); Assert.assertEquals(2, scan.getAttributesMap().size()); - Assert.assertTrue(Arrays.equals(Bytes.toBytes("value2"), - scan.getAttributesMap().get("attribute2"))); + Assert.assertTrue( + Arrays.equals(Bytes.toBytes("value2"), scan.getAttributesMap().get("attribute2"))); // removing attribute scan.setAttribute("attribute2", null); @@ -199,7 +190,7 @@ public void testSetStartRowAndSetStopRow() { scan.setStartRow(new byte[1]); scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH]); try { - scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH+1]); + scan.setStartRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { @@ -210,7 +201,7 @@ public void testSetStartRowAndSetStopRow() { scan.setStopRow(new byte[1]); scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH]); try { - scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH+1]); + scan.setStopRow(new byte[HConstants.MAX_ROW_LENGTH + 1]); fail("should've thrown exception"); } catch (IllegalArgumentException iae) { } catch (Exception e) { @@ -223,36 +214,17 @@ public void testScanCopyConstructor() throws Exception { Scan scan = new Scan(); scan.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("q")) - .setACL("test_user", new Permission(Permission.Action.READ)) - .setAllowPartialResults(true) - .setAsyncPrefetch(false) - .setAttribute("test_key", Bytes.toBytes("test_value")) - .setAuthorizations(new Authorizations("test_label")) - .setBatch(10) - .setCacheBlocks(false) - .setCaching(10) - .setConsistency(Consistency.TIMELINE) - .setFilter(new FilterList()) - .setId("scan_copy_constructor") - .setIsolationLevel(IsolationLevel.READ_COMMITTED) - .setLimit(100) - .setLoadColumnFamiliesOnDemand(false) - .setMaxResultSize(100) - .setMaxResultsPerColumnFamily(1000) - .readVersions(9999) - .setMvccReadPoint(5) - .setNeedCursorResult(true) - .setPriority(1) - .setRaw(true) - .setReplicaId(3) - .setReversed(true) - .setRowOffsetPerColumnFamily(5) - .setStartStopRowForPrefixScan(Bytes.toBytes("row_")) - .setScanMetricsEnabled(true) - .setSmall(true) - .setReadType(ReadType.STREAM) - .withStartRow(Bytes.toBytes("row_1")) - .withStopRow(Bytes.toBytes("row_2")) + .setACL("test_user", new Permission(Permission.Action.READ)).setAllowPartialResults(true) + .setAsyncPrefetch(false).setAttribute("test_key", Bytes.toBytes("test_value")) + .setAuthorizations(new Authorizations("test_label")).setBatch(10).setCacheBlocks(false) + .setCaching(10).setConsistency(Consistency.TIMELINE).setFilter(new FilterList()) + .setId("scan_copy_constructor").setIsolationLevel(IsolationLevel.READ_COMMITTED) + .setLimit(100).setLoadColumnFamiliesOnDemand(false).setMaxResultSize(100) + .setMaxResultsPerColumnFamily(1000).readVersions(9999).setMvccReadPoint(5) + .setNeedCursorResult(true).setPriority(1).setRaw(true).setReplicaId(3).setReversed(true) + .setRowOffsetPerColumnFamily(5).setStartStopRowForPrefixScan(Bytes.toBytes("row_")) + .setScanMetricsEnabled(true).setSmall(true).setReadType(ReadType.STREAM) + .withStartRow(Bytes.toBytes("row_1")).withStopRow(Bytes.toBytes("row_2")) .setTimeRange(0, 13); // create a copy of existing scan object diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScannerCallable.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScannerCallable.java index 6c48fd26fb1e..6a46d8622a78 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScannerCallable.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestScannerCallable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import static org.mockito.Mockito.when; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HRegionLocation; @@ -77,8 +76,7 @@ public void setUp() throws Exception { @Test public void testPrepareAlwaysUsesCache() throws Exception { - when(connection.locateRegion(TABLE_NAME, ROW, true, true, 0)) - .thenReturn(regionLocations); + when(connection.locateRegion(TABLE_NAME, ROW, true, true, 0)).thenReturn(regionLocations); ScannerCallable callable = new ScannerCallable(connection, TABLE_NAME, DEFAULT_SCAN, null, rpcFactory, 0); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java index 3dc86de2f354..be35b3cdc6ab 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSimpleRequestController.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,26 +48,25 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestSimpleRequestController { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSimpleRequestController.class); - private static final TableName DUMMY_TABLE - = TableName.valueOf("DUMMY_TABLE"); + private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); private static final byte[] DUMMY_BYTES_1 = Bytes.toBytes("DUMMY_BYTES_1"); private static final byte[] DUMMY_BYTES_2 = Bytes.toBytes("DUMMY_BYTES_2"); private static final byte[] DUMMY_BYTES_3 = Bytes.toBytes("DUMMY_BYTES_3"); private static final ServerName SN = ServerName.valueOf("s1,1,1"); private static final ServerName SN2 = ServerName.valueOf("s2,2,2"); - private static final HRegionInfo HRI1 - = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1); - private static final HRegionInfo HRI2 - = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2); - private static final HRegionInfo HRI3 - = new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3); + private static final HRegionInfo HRI1 = + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_1, DUMMY_BYTES_2, false, 1); + private static final HRegionInfo HRI2 = + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_2, HConstants.EMPTY_END_ROW, false, 2); + private static final HRegionInfo HRI3 = + new HRegionInfo(DUMMY_TABLE, DUMMY_BYTES_3, HConstants.EMPTY_END_ROW, false, 3); private static final HRegionLocation LOC1 = new HRegionLocation(HRI1, SN); private static final HRegionLocation LOC2 = new HRegionLocation(HRI2, SN); private static final HRegionLocation LOC3 = new HRegionLocation(HRI3, SN2); @@ -125,11 +124,9 @@ public void testTaskCheckerHost() throws IOException { final Map taskCounterPerServer = new HashMap<>(); final Map taskCounterPerRegion = new HashMap<>(); SimpleRequestController.TaskCountChecker countChecker = - new SimpleRequestController.TaskCountChecker( - maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, taskCounterPerServer, taskCounterPerRegion); + new SimpleRequestController.TaskCountChecker(maxTotalConcurrentTasks, + maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, tasksInProgress, + taskCounterPerServer, taskCounterPerRegion); final long maxHeapSizePerRequest = 2 * 1024 * 1024; // unlimiited SimpleRequestController.RequestHeapSizeChecker sizeChecker = @@ -166,8 +163,8 @@ public void testTaskCheckerHost() throws IOException { @Test public void testRequestHeapSizeChecker() throws IOException { final long maxHeapSizePerRequest = 2 * 1024 * 1024; - SimpleRequestController.RequestHeapSizeChecker checker - = new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); + SimpleRequestController.RequestHeapSizeChecker checker = + new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); // inner state is unchanged. for (int i = 0; i != 10; ++i) { @@ -208,10 +205,10 @@ public void testRequestHeapSizeChecker() throws IOException { @Test public void testRequestRowsChecker() throws IOException { final long maxRowCount = 100; - SimpleRequestController.RequestRowsChecker checker - = new SimpleRequestController.RequestRowsChecker(maxRowCount); + SimpleRequestController.RequestRowsChecker checker = + new SimpleRequestController.RequestRowsChecker(maxRowCount); - final long heapSizeOfRow = 100; //unused + final long heapSizeOfRow = 100; // unused // inner state is unchanged. for (int i = 0; i != 10; ++i) { ReturnCode code = checker.canTakeOperation(LOC1, heapSizeOfRow); @@ -253,8 +250,8 @@ public void testRequestRowsChecker() throws IOException { @Test public void testSubmittedSizeChecker() { final long maxHeapSizeSubmit = 2 * 1024 * 1024; - SimpleRequestController.SubmittedSizeChecker checker - = new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit); + SimpleRequestController.SubmittedSizeChecker checker = + new SimpleRequestController.SubmittedSizeChecker(maxHeapSizeSubmit); for (int i = 0; i != 10; ++i) { ReturnCode include = checker.canTakeOperation(LOC1, 100000); @@ -290,10 +287,8 @@ public void testTaskCountChecker() throws InterruptedIOException { Map taskCounterPerServer = new HashMap<>(); Map taskCounterPerRegion = new HashMap<>(); SimpleRequestController.TaskCountChecker checker = new SimpleRequestController.TaskCountChecker( - maxTotalConcurrentTasks, - maxConcurrentTasksPerServer, - maxConcurrentTasksPerRegion, - tasksInProgress, taskCounterPerServer, taskCounterPerRegion); + maxTotalConcurrentTasks, maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, + tasksInProgress, taskCounterPerServer, taskCounterPerRegion); // inner state is unchanged. for (int i = 0; i != 10; ++i) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java index ec2d29d1fa4c..114905fb594f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ /** * Test snapshot logic from the client */ -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestSnapshotFromAdmin { @ClassRule @@ -69,7 +69,7 @@ public class TestSnapshotFromAdmin { public void testBackoffLogic() throws Exception { final int pauseTime = 100; final int maxWaitTime = - HConstants.RETRY_BACKOFF[HConstants.RETRY_BACKOFF.length - 1] * pauseTime; + HConstants.RETRY_BACKOFF[HConstants.RETRY_BACKOFF.length - 1] * pauseTime; final int numRetries = HConstants.RETRY_BACKOFF.length; // calculate the wait time, if we just do straight backoff (ignoring the expected time from // master) @@ -80,11 +80,11 @@ public void testBackoffLogic() throws Exception { // the correct wait time, capping at the maxTime/tries + fudge room final long time = pauseTime * 3L + ((maxWaitTime / numRetries) * 3) + 300L; assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time " - + "- further testing won't prove anything.", time < ignoreExpectedTime); + + "- further testing won't prove anything.", + time < ignoreExpectedTime); // setup the mocks - ConnectionImplementation mockConnection = Mockito - .mock(ConnectionImplementation.class); + ConnectionImplementation mockConnection = Mockito.mock(ConnectionImplementation.class); Configuration conf = HBaseConfiguration.create(); // setup the conf to match the expected properties conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, numRetries); @@ -97,26 +97,22 @@ public void testBackoffLogic() throws Exception { // we need a real retrying caller RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf); RpcControllerFactory controllerFactory = Mockito.mock(RpcControllerFactory.class); - Mockito.when(controllerFactory.newController()).thenReturn( - Mockito.mock(HBaseRpcController.class)); + Mockito.when(controllerFactory.newController()) + .thenReturn(Mockito.mock(HBaseRpcController.class)); Mockito.when(mockConnection.getRpcRetryingCallerFactory()).thenReturn(callerFactory); Mockito.when(mockConnection.getRpcControllerFactory()).thenReturn(controllerFactory); // set the max wait time for the snapshot to complete - SnapshotResponse response = SnapshotResponse.newBuilder() - .setExpectedTimeout(maxWaitTime) - .build(); - Mockito - .when( - mockMaster.snapshot((RpcController) Mockito.any(), - Mockito.any())).thenReturn(response); + SnapshotResponse response = + SnapshotResponse.newBuilder().setExpectedTimeout(maxWaitTime).build(); + Mockito.when(mockMaster.snapshot((RpcController) Mockito.any(), Mockito.any())) + .thenReturn(response); // setup the response IsSnapshotDoneResponse.Builder builder = IsSnapshotDoneResponse.newBuilder(); builder.setDone(false); // first five times, we return false, last we get success - Mockito.when( - mockMaster.isSnapshotDone((RpcController) Mockito.any(), - Mockito.any())).thenReturn(builder.build(), builder.build(), - builder.build(), builder.build(), builder.build(), builder.setDone(true).build()); + Mockito.when(mockMaster.isSnapshotDone((RpcController) Mockito.any(), Mockito.any())) + .thenReturn(builder.build(), builder.build(), builder.build(), builder.build(), + builder.build(), builder.setDone(true).build()); // setup the admin and run the test Admin admin = new HBaseAdmin(mockConnection); @@ -138,15 +134,14 @@ public void testBackoffLogic() throws Exception { */ @Test public void testValidateSnapshotName() throws Exception { - ConnectionImplementation mockConnection = Mockito - .mock(ConnectionImplementation.class); + ConnectionImplementation mockConnection = Mockito.mock(ConnectionImplementation.class); Configuration conf = HBaseConfiguration.create(); Mockito.when(mockConnection.getConfiguration()).thenReturn(conf); // we need a real retrying caller RpcRetryingCallerFactory callerFactory = new RpcRetryingCallerFactory(conf); RpcControllerFactory controllerFactory = Mockito.mock(RpcControllerFactory.class); - Mockito.when(controllerFactory.newController()).thenReturn( - Mockito.mock(HBaseRpcController.class)); + Mockito.when(controllerFactory.newController()) + .thenReturn(Mockito.mock(HBaseRpcController.class)); Mockito.when(mockConnection.getRpcRetryingCallerFactory()).thenReturn(callerFactory); Mockito.when(mockConnection.getRpcControllerFactory()).thenReturn(controllerFactory); Admin admin = new HBaseAdmin(mockConnection); @@ -166,20 +161,17 @@ public void testValidateSnapshotName() throws Exception { MasterKeepAliveConnection master = Mockito.mock(MasterKeepAliveConnection.class); Mockito.when(mockConnection.getMaster()).thenReturn(master); SnapshotResponse response = SnapshotResponse.newBuilder().setExpectedTimeout(0).build(); - Mockito.when( - master.snapshot((RpcController) Mockito.any(), Mockito.any())) + Mockito.when(master.snapshot((RpcController) Mockito.any(), Mockito.any())) .thenReturn(response); IsSnapshotDoneResponse doneResponse = IsSnapshotDoneResponse.newBuilder().setDone(true).build(); - Mockito.when( - master.isSnapshotDone((RpcController) Mockito.any(), - Mockito.any())).thenReturn(doneResponse); + Mockito.when(master.isSnapshotDone((RpcController) Mockito.any(), Mockito.any())) + .thenReturn(doneResponse); - // make sure that we can use valid names + // make sure that we can use valid names admin.snapshot(new SnapshotDescription("snapshot", TableName.valueOf(name.getMethodName()))); } - private void failSnapshotStart(Admin admin, SnapshotDescription snapshot) - throws IOException { + private void failSnapshotStart(Admin admin, SnapshotDescription snapshot) throws IOException { try { admin.snapshot(snapshot); fail("Snapshot should not have succeed with name:" + snapshot.getName()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index 658ad0641a55..d778caca28dc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ /** * Test setting values in the descriptor. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestTableDescriptorBuilder { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -56,28 +56,21 @@ public class TestTableDescriptorBuilder { @Rule public TestName name = new TestName(); - @Test (expected=IOException.class) + @Test(expected = IOException.class) public void testAddCoprocessorTwice() throws IOException { String cpName = "a.b.c.d"; - TableDescriptor htd - = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) - .setCoprocessor(cpName) - .setCoprocessor(cpName) - .build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) + .setCoprocessor(cpName).setCoprocessor(cpName).build(); } @Test public void testPb() throws DeserializationException, IOException { final int v = 123; - TableDescriptor htd - = TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME) - .setMaxFileSize(v) - .setDurability(Durability.ASYNC_WAL) - .setReadOnly(true) - .setRegionReplication(2) - .build(); - - byte [] bytes = TableDescriptorBuilder.toByteArray(htd); + TableDescriptor htd = + TableDescriptorBuilder.newBuilder(TableName.META_TABLE_NAME).setMaxFileSize(v) + .setDurability(Durability.ASYNC_WAL).setReadOnly(true).setRegionReplication(2).build(); + + byte[] bytes = TableDescriptorBuilder.toByteArray(htd); TableDescriptor deserializedHtd = TableDescriptorBuilder.parseFrom(bytes); assertEquals(htd, deserializedHtd); assertEquals(v, deserializedHtd.getMaxFileSize()); @@ -88,74 +81,73 @@ public void testPb() throws DeserializationException, IOException { /** * Test cps in the table description. - * * @throws Exception if setting a coprocessor fails */ @Test public void testGetSetRemoveCP() throws Exception { // simple CP String className = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; - TableDescriptor desc - = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setCoprocessor(className) // add and check that it is present + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())).setCoprocessor(className) // add and + // check that + // it is + // present .build(); assertTrue(desc.hasCoprocessor(className)); - desc = TableDescriptorBuilder.newBuilder(desc) - .removeCoprocessor(className) // remove it and check that it is gone + desc = TableDescriptorBuilder.newBuilder(desc).removeCoprocessor(className) // remove it and + // check that it is + // gone .build(); assertFalse(desc.hasCoprocessor(className)); } /** * Test cps in the table description. - * * @throws Exception if setting a coprocessor fails */ @Test public void testSetListRemoveCP() throws Exception { - TableDescriptor desc - = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); // Check that any coprocessor is present. assertTrue(desc.getCoprocessorDescriptors().isEmpty()); // simple CP String className1 = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; String className2 = "org.apache.hadoop.hbase.coprocessor.SampleRegionWALObserver"; - desc = TableDescriptorBuilder.newBuilder(desc) - .setCoprocessor(className1) // Add the 1 coprocessor and check if present. - .build(); + desc = TableDescriptorBuilder.newBuilder(desc).setCoprocessor(className1) // Add the 1 + // coprocessor and + // check if present. + .build(); assertTrue(desc.getCoprocessorDescriptors().size() == 1); assertTrue(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className1))); + .anyMatch(name -> name.equals(className1))); desc = TableDescriptorBuilder.newBuilder(desc) - // Add the 2nd coprocessor and check if present. - // remove it and check that it is gone - .setCoprocessor(className2) - .build(); + // Add the 2nd coprocessor and check if present. + // remove it and check that it is gone + .setCoprocessor(className2).build(); assertTrue(desc.getCoprocessorDescriptors().size() == 2); assertTrue(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className2))); + .anyMatch(name -> name.equals(className2))); desc = TableDescriptorBuilder.newBuilder(desc) - // Remove one and check - .removeCoprocessor(className1) - .build(); + // Remove one and check + .removeCoprocessor(className1).build(); assertTrue(desc.getCoprocessorDescriptors().size() == 1); assertFalse(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className1))); + .anyMatch(name -> name.equals(className1))); assertTrue(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className2))); + .anyMatch(name -> name.equals(className2))); desc = TableDescriptorBuilder.newBuilder(desc) - // Remove the last and check - .removeCoprocessor(className2) - .build(); + // Remove the last and check + .removeCoprocessor(className2).build(); assertTrue(desc.getCoprocessorDescriptors().isEmpty()); assertFalse(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className1))); + .anyMatch(name -> name.equals(className1))); assertFalse(desc.getCoprocessorDescriptors().stream().map(CoprocessorDescriptor::getClassName) - .anyMatch(name -> name.equals(className2))); + .anyMatch(name -> name.equals(className2))); } /** @@ -164,9 +156,8 @@ public void testSetListRemoveCP() throws Exception { @Test public void testRemoveNonExistingCoprocessor() throws Exception { String className = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertFalse(desc.hasCoprocessor(className)); desc = TableDescriptorBuilder.newBuilder(desc).removeCoprocessor(className).build(); assertFalse(desc.hasCoprocessor(className)); @@ -179,24 +170,20 @@ public void testRemoveNonExistingCoprocessor() throws Exception { public void testRemoveString() { byte[] key = Bytes.toBytes("Some"); byte[] value = Bytes.toBytes("value"); - TableDescriptor desc - = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setValue(key, value) - .build(); + TableDescriptor desc = TableDescriptorBuilder + .newBuilder(TableName.valueOf(name.getMethodName())).setValue(key, value).build(); assertTrue(Bytes.equals(value, desc.getValue(key))); - desc = TableDescriptorBuilder.newBuilder(desc) - .removeValue(key) - .build(); + desc = TableDescriptorBuilder.newBuilder(desc).removeValue(key).build(); assertTrue(desc.getValue(key) == null); } String[] legalTableNames = { "foo", "with-dash_under.dot", "_under_start_ok", - "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02", - "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", - "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02"}; + "with-dash.with_underscore", "02-01-2012.my_table_01-02", "xyz._mytable_", "9_9_0.table_02", + "dot1.dot2.table", "new.-mytable", "with-dash.with.dot", "legal..t2", "legal..legal.t2", + "trailingdots..", "trailing.dots...", "ns:mytable", "ns:_mytable_", "ns:my_table_01-02" }; String[] illegalTableNames = { ".dot_start_illegal", "-dash_start_illegal", "spaces not ok", - "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", - "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2"}; + "-dash-.start_illegal", "new.table with space", "01 .table", "ns:-illegaldash", + "new:.illegaldot", "new:illegalcolon1:", "new:illegalcolon1:2" }; @Test public void testLegalTableNames() { @@ -221,8 +208,8 @@ public void testIllegalTableNames() { public void testLegalTableNamesRegex() { for (String tn : legalTableNames) { TableName tName = TableName.valueOf(tn); - assertTrue("Testing: '" + tn + "'", Pattern.matches(TableName.VALID_USER_TABLE_REGEX, - tName.getNameAsString())); + assertTrue("Testing: '" + tn + "'", + Pattern.matches(TableName.VALID_USER_TABLE_REGEX, tName.getNameAsString())); } } @@ -234,24 +221,23 @@ public void testIllegalTableNamesRegex() { } } - /** + /** * Test default value handling for maxFileSize */ @Test public void testGetMaxFileSize() { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertEquals(-1, desc.getMaxFileSize()); - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setMaxFileSize(1111L).build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setMaxFileSize(1111L).build(); assertEquals(1111L, desc.getMaxFileSize()); } @Test public void testSetMaxFileSize() throws HBaseException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); String maxFileSize = "1073741824"; builder.setMaxFileSize(maxFileSize); @@ -280,19 +266,18 @@ public void testSetMaxFileSize() throws HBaseException { */ @Test public void testGetMemStoreFlushSize() { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())).build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).build(); assertEquals(-1, desc.getMemStoreFlushSize()); - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setMemStoreFlushSize(1111L).build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setMemStoreFlushSize(1111L).build(); assertEquals(1111L, desc.getMemStoreFlushSize()); } @Test public void testSetMemStoreFlushSize() throws HBaseException { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); String memstoreFlushSize = "1073741824"; builder.setMemStoreFlushSize(memstoreFlushSize); @@ -325,50 +310,36 @@ public void testClassMethodsAreBuilderStyle() { public void testModifyFamily() { byte[] familyName = Bytes.toBytes("cf"); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(1000) - .setDFSReplication((short) 3) - .build(); - TableDescriptor htd - = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd) - .build(); + .setBlocksize(1000).setDFSReplication((short) 3).build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(hcd).build(); assertEquals(1000, htd.getColumnFamily(familyName).getBlocksize()); assertEquals(3, htd.getColumnFamily(familyName).getDFSReplication()); - hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(2000) - .setDFSReplication((short) 1) - .build(); - htd = TableDescriptorBuilder.newBuilder(htd) - .modifyColumnFamily(hcd) - .build(); + hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(2000) + .setDFSReplication((short) 1).build(); + htd = TableDescriptorBuilder.newBuilder(htd).modifyColumnFamily(hcd).build(); assertEquals(2000, htd.getColumnFamily(familyName).getBlocksize()); assertEquals(1, htd.getColumnFamily(familyName).getDFSReplication()); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testModifyInexistentFamily() { byte[] familyName = Bytes.toBytes("cf"); HColumnDescriptor hcd = new HColumnDescriptor(familyName); - TableDescriptor htd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .modifyColumnFamily(hcd) - .build(); + TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .modifyColumnFamily(hcd).build(); } - @Test(expected=IllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testAddDuplicateFamilies() { byte[] familyName = Bytes.toBytes("cf"); - ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(1000) - .build(); + ColumnFamilyDescriptor hcd = + ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(1000).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd) - .build(); + .setColumnFamily(hcd).build(); assertEquals(1000, htd.getColumnFamily(familyName).getBlocksize()); - hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(2000) - .build(); + hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(2000).build(); // add duplicate column TableDescriptorBuilder.newBuilder(htd).setColumnFamily(hcd).build(); } @@ -376,45 +347,37 @@ public void testAddDuplicateFamilies() { @Test public void testPriority() { TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setPriority(42) - .build(); + .setPriority(42).build(); assertEquals(42, htd.getPriority()); } @Test public void testStringCustomizedValues() throws HBaseException { byte[] familyName = Bytes.toBytes("cf"); - ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(familyName) - .setBlocksize(131072) - .build(); + ColumnFamilyDescriptor hcd = + ColumnFamilyDescriptorBuilder.newBuilder(familyName).setBlocksize(131072).build(); TableDescriptor htd = TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(hcd) - .setDurability(Durability.ASYNC_WAL) - .build(); + .setColumnFamily(hcd).setDurability(Durability.ASYNC_WAL).build(); assertEquals( - "'testStringCustomizedValues', " + - "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, " - + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", + "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL'}}, " + + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", htd.toStringCustomizedValues()); - htd = TableDescriptorBuilder.newBuilder(htd) - .setMaxFileSize("10737942528") - .setMemStoreFlushSize("256MB") - .build(); + htd = TableDescriptorBuilder.newBuilder(htd).setMaxFileSize("10737942528") + .setMemStoreFlushSize("256MB").build(); assertEquals( - "'testStringCustomizedValues', " + - "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " - + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " - + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, " - + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", + "'testStringCustomizedValues', " + "{TABLE_ATTRIBUTES => {DURABILITY => 'ASYNC_WAL', " + + "MAX_FILESIZE => '10737942528 B (10GB 512KB)', " + + "MEMSTORE_FLUSHSIZE => '268435456 B (256MB)'}}, " + + "{NAME => 'cf', BLOCKSIZE => '131072 B (128KB)'}", htd.toStringCustomizedValues()); } @Test public void testSetEmptyValue() { TableDescriptorBuilder builder = - TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())); String testValue = "TestValue"; // test setValue builder.setValue(testValue, "2"); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java index 44d199764c5b..532d4a805db2 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import static org.junit.Assert.assertEquals; import java.util.Arrays; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.TableDescriptorUtils.TableDescriptorDelta; @@ -43,32 +41,25 @@ public void testDelta() { ColumnFamilyDescriptor cf2 = ColumnFamilyDescriptorBuilder.of("cf2"); ColumnFamilyDescriptor cf3 = ColumnFamilyDescriptorBuilder.of("cf3"); ColumnFamilyDescriptor cf4 = ColumnFamilyDescriptorBuilder.of("cf4"); - TableDescriptor td = TableDescriptorBuilder - .newBuilder(TableName.valueOf("test")) - .setColumnFamilies(Arrays.asList(cf1, cf2, cf3, cf4)) - .build(); + TableDescriptor td = TableDescriptorBuilder.newBuilder(TableName.valueOf("test")) + .setColumnFamilies(Arrays.asList(cf1, cf2, cf3, cf4)).build(); TableDescriptorDelta selfCompare = TableDescriptorUtils.computeDelta(td, td); assertEquals(0, selfCompare.getColumnsAdded().size()); assertEquals(0, selfCompare.getColumnsDeleted().size()); assertEquals(0, selfCompare.getColumnsModified().size()); - ColumnFamilyDescriptor modCf2 = ColumnFamilyDescriptorBuilder - .newBuilder(cf2).setMaxVersions(5).build(); - ColumnFamilyDescriptor modCf3 = ColumnFamilyDescriptorBuilder - .newBuilder(cf3).setMaxVersions(5).build(); + ColumnFamilyDescriptor modCf2 = + ColumnFamilyDescriptorBuilder.newBuilder(cf2).setMaxVersions(5).build(); + ColumnFamilyDescriptor modCf3 = + ColumnFamilyDescriptorBuilder.newBuilder(cf3).setMaxVersions(5).build(); ColumnFamilyDescriptor cf5 = ColumnFamilyDescriptorBuilder.of("cf5"); ColumnFamilyDescriptor cf6 = ColumnFamilyDescriptorBuilder.of("cf6"); ColumnFamilyDescriptor cf7 = ColumnFamilyDescriptorBuilder.of("cf7"); - TableDescriptor newTd = TableDescriptorBuilder - .newBuilder(td) - .removeColumnFamily(Bytes.toBytes("cf1")) - .modifyColumnFamily(modCf2) - .modifyColumnFamily(modCf3) - .setColumnFamily(cf5) - .setColumnFamily(cf6) - .setColumnFamily(cf7) - .build(); + TableDescriptor newTd = + TableDescriptorBuilder.newBuilder(td).removeColumnFamily(Bytes.toBytes("cf1")) + .modifyColumnFamily(modCf2).modifyColumnFamily(modCf3).setColumnFamily(cf5) + .setColumnFamily(cf6).setColumnFamily(cf7).build(); TableDescriptorDelta delta = TableDescriptorUtils.computeDelta(td, newTd); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableRpcPriority.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableRpcPriority.java index 882381bb067c..6412b97c4ace 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableRpcPriority.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableRpcPriority.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; + import java.io.IOException; import java.util.Arrays; import java.util.Optional; @@ -62,20 +63,22 @@ import org.mockito.ArgumentMatcher; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; + import org.apache.hbase.thirdparty.com.google.protobuf.ServiceException; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** - * Test that correct rpc priority is sent to server from blocking Table calls. Currently - * only implements checks for scans, but more could be added here. + * Test that correct rpc priority is sent to server from blocking Table calls. Currently only + * implements checks for scans, but more could be added here. */ @Category({ ClientTests.class, MediumTests.class }) public class TestTableRpcPriority { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableRpcPriority.class); + HBaseClassTestRule.forClass(TestTableRpcPriority.class); @Rule public TestName name = new TestName(); @@ -91,23 +94,23 @@ public void setUp() throws IOException, ServiceException { ExecutorService executorService = Executors.newCachedThreadPool(); conn = new ConnectionImplementation(conf, executorService, - UserProvider.instantiate(conf).getCurrent(), new DoNothingConnectionRegistry(conf)) { + UserProvider.instantiate(conf).getCurrent(), new DoNothingConnectionRegistry(conf)) { @Override public ClientProtos.ClientService.BlockingInterface getClient(ServerName serverName) - throws IOException { + throws IOException { return stub; } @Override public RegionLocations relocateRegion(final TableName tableName, final byte[] row, - int replicaId) throws IOException { + int replicaId) throws IOException { return locateRegion(tableName, row, true, false, replicaId); } @Override public RegionLocations locateRegion(TableName tableName, byte[] row, boolean useCache, - boolean retry, int replicaId) throws IOException { + boolean retry, int replicaId) throws IOException { RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); ServerName serverName = ServerName.valueOf("rs", 16010, 12345); HRegionLocation loc = new HRegionLocation(info, serverName); @@ -123,8 +126,8 @@ public void testScan() throws Exception { } /** - * This test verifies that our closeScanner request honors the original - * priority of the scan if it's greater than our expected HIGH_QOS for close calls. + * This test verifies that our closeScanner request honors the original priority of the scan if + * it's greater than our expected HIGH_QOS for close calls. */ @Test public void testScanSuperHighPriority() throws Exception { @@ -163,19 +166,19 @@ private void testForTable(TableName tableName, Optional priority) throw // just verify that the calls happened. verification of priority occurred in the mocking // open, next, then several renew lease verify(stub, atLeast(3)).scan(any(), any(ClientProtos.ScanRequest.class)); - verify(stub, times(1)).scan( - assertControllerArgs(Math.max(priority.orElse(0), HIGH_QOS)), assertScannerCloseRequest()); + verify(stub, times(1)).scan(assertControllerArgs(Math.max(priority.orElse(0), HIGH_QOS)), + assertScannerCloseRequest()); } private void mockScan(int scanPriority) throws ServiceException { int scannerId = 1; doAnswer(new Answer() { - @Override public ClientProtos.ScanResponse answer(InvocationOnMock invocation) - throws Throwable { + @Override + public ClientProtos.ScanResponse answer(InvocationOnMock invocation) throws Throwable { throw new IllegalArgumentException( - "Call not covered by explicit mock for arguments controller=" - + invocation.getArgument(0) + ", request=" + invocation.getArgument(1)); + "Call not covered by explicit mock for arguments controller=" + + invocation.getArgument(0) + ", request=" + invocation.getArgument(1)); } }).when(stub).scan(any(), any()); @@ -183,8 +186,7 @@ private void mockScan(int scanPriority) throws ServiceException { doAnswer(new Answer() { @Override - public ClientProtos.ScanResponse answer(InvocationOnMock invocation) - throws Throwable { + public ClientProtos.ScanResponse answer(InvocationOnMock invocation) throws Throwable { ClientProtos.ScanRequest req = invocation.getArgument(1); assertFalse("close scanner should not come in with scan priority " + scanPriority, req.hasCloseScanner() && req.getCloseScanner()); @@ -197,19 +199,18 @@ public ClientProtos.ScanResponse answer(InvocationOnMock invocation) } Cell cell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setType(Cell.Type.Put) - .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())).setFamily(Bytes.toBytes("cf")) - .setQualifier(Bytes.toBytes("cq")).setValue(Bytes.toBytes("v")).build(); + .setRow(Bytes.toBytes(scanNextCalled.incrementAndGet())).setFamily(Bytes.toBytes("cf")) + .setQualifier(Bytes.toBytes("cq")).setValue(Bytes.toBytes("v")).build(); Result result = Result.create(Arrays.asList(cell)); return builder.setTtl(800).setMoreResultsInRegion(true).setMoreResults(true) - .addResults(ProtobufUtil.toResult(result)).build(); + .addResults(ProtobufUtil.toResult(result)).build(); } }).when(stub).scan(assertControllerArgs(scanPriority), any()); doAnswer(new Answer() { @Override - public ClientProtos.ScanResponse answer(InvocationOnMock invocation) - throws Throwable { + public ClientProtos.ScanResponse answer(InvocationOnMock invocation) throws Throwable { ClientProtos.ScanRequest req = invocation.getArgument(1); assertTrue("close request should have scannerId", req.hasScannerId()); assertEquals("close request's scannerId should match", scannerId, req.getScannerId()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTracingBase.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTracingBase.java index 2a10d3b9e8c6..96ae822f21e8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTracingBase.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTracingBase.java @@ -23,6 +23,7 @@ import static org.hamcrest.Matchers.hasItem; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; import io.opentelemetry.sdk.testing.junit4.OpenTelemetryRule; @@ -50,8 +51,8 @@ public class TestTracingBase { private static final Logger LOG = LoggerFactory.getLogger(TestTracingBase.class); protected static final ServerName MASTER_HOST = ServerName.valueOf("localhost", 16010, 12345); - protected static final RegionLocations META_REGION_LOCATION = - new RegionLocations(new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, MASTER_HOST)); + protected static final RegionLocations META_REGION_LOCATION = new RegionLocations( + new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, MASTER_HOST)); protected Configuration conf; @@ -67,24 +68,21 @@ public void setUp() throws Exception { } protected void assertTrace(String className, String methodName, ServerName serverName, - TableName tableName) { + TableName tableName) { String expectedSpanName = String.format("%s.%s", className, methodName); Waiter.waitFor(conf, 1000, - () -> TRACE_RULE.getSpans().stream() - .anyMatch(span -> span.getName().equals(expectedSpanName) && - span.getKind() == SpanKind.INTERNAL && span.hasEnded())); - SpanData data = TRACE_RULE.getSpans().stream() - .filter(s -> s.getName().equals(expectedSpanName)).findFirst().get(); + () -> TRACE_RULE.getSpans().stream().anyMatch(span -> span.getName().equals(expectedSpanName) + && span.getKind() == SpanKind.INTERNAL && span.hasEnded())); + SpanData data = TRACE_RULE.getSpans().stream().filter(s -> s.getName().equals(expectedSpanName)) + .findFirst().get(); assertEquals(StatusCode.OK, data.getStatus().getStatusCode()); if (serverName != null) { Optional foundServerName = - TRACE_RULE.getSpans().stream() - .filter(s -> s.getName().equals(expectedSpanName)) - .filter(s -> Objects.equals( - serverName.getServerName(), - s.getAttributes().get(HBaseSemanticAttributes.SERVER_NAME_KEY))) - .findAny(); + TRACE_RULE.getSpans().stream().filter(s -> s.getName().equals(expectedSpanName)) + .filter(s -> Objects.equals(serverName.getServerName(), + s.getAttributes().get(HBaseSemanticAttributes.SERVER_NAME_KEY))) + .findAny(); assertTrue(foundServerName.isPresent()); } @@ -103,19 +101,15 @@ protected SpanData waitSpan(String name) { protected SpanData waitSpan(Matcher matcher) { Matcher spanLocator = allOf(matcher, hasEnded()); try { - Waiter.waitFor(conf, 1000, new MatcherPredicate<>( - "waiting for span", - () -> TRACE_RULE.getSpans(), hasItem(spanLocator))); + Waiter.waitFor(conf, 1000, new MatcherPredicate<>("waiting for span", + () -> TRACE_RULE.getSpans(), hasItem(spanLocator))); } catch (AssertionError e) { LOG.error("AssertionError while waiting for matching span. Span reservoir contains: {}", TRACE_RULE.getSpans()); throw e; } - return TRACE_RULE.getSpans() - .stream() - .filter(spanLocator::matches) - .findFirst() - .orElseThrow(AssertionError::new); + return TRACE_RULE.getSpans().stream().filter(spanLocator::matches).findFirst() + .orElseThrow(AssertionError::new); } static class RegistryForTracingTest implements ConnectionRegistry { @@ -138,11 +132,13 @@ public CompletableFuture getActiveMaster() { return CompletableFuture.completedFuture(MASTER_HOST); } - @Override public String getConnectionString() { + @Override + public String getConnectionString() { return "nothing"; } - @Override public void close() { + @Override + public void close() { } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java index 2c7061259f90..df6ffbbb2b18 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/StringTraceRenderer.java @@ -75,56 +75,43 @@ private static void populateChildren(final Map spansById) { } private static List findRoots(final Map spansById) { - return spansById.values() - .stream() - .filter(node -> Objects.equals(node.spanData.getParentSpanId(), SpanId.getInvalid())) - .collect(Collectors.toList()); + return spansById.values().stream() + .filter(node -> Objects.equals(node.spanData.getParentSpanId(), SpanId.getInvalid())) + .collect(Collectors.toList()); } public void render(final Consumer writer) { - for (ListIterator iter = graphs.listIterator(); iter.hasNext(); ) { + for (ListIterator iter = graphs.listIterator(); iter.hasNext();) { final int idx = iter.nextIndex(); final Node node = iter.next(); render(writer, node, 0, idx == 0); } } - private static void render( - final Consumer writer, - final Node node, - final int indent, - final boolean isFirst - ) { + private static void render(final Consumer writer, final Node node, final int indent, + final boolean isFirst) { writer.accept(render(node.spanData, indent, isFirst)); final List children = new ArrayList<>(node.children.values()); - for (ListIterator iter = children.listIterator(); iter.hasNext(); ) { + for (ListIterator iter = children.listIterator(); iter.hasNext();) { final int idx = iter.nextIndex(); final Node child = iter.next(); render(writer, child, indent + 2, idx == 0); } } - private static String render( - final SpanData spanData, - final int indent, - final boolean isFirst - ) { + private static String render(final SpanData spanData, final int indent, final boolean isFirst) { final StringBuilder sb = new StringBuilder(); for (int i = 0; i < indent; i++) { sb.append(' '); } - return sb.append(isFirst ? "└─ " : "├─ ") - .append(render(spanData)) - .toString(); + return sb.append(isFirst ? "└─ " : "├─ ").append(render(spanData)).toString(); } private static String render(final SpanData spanData) { return new ToStringBuilder(spanData, ToStringStyle.NO_CLASS_NAME_STYLE) - .append("spanId", spanData.getSpanId()) - .append("name", spanData.getName()) - .append("hasEnded", spanData.hasEnded()) - .toString(); + .append("spanId", spanData.getSpanId()).append("name", spanData.getName()) + .append("hasEnded", spanData.hasEnded()).toString(); } private static class Node { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java index c7bb205076cd..1a2d6ea4ccdc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/AttributesMatchers.java @@ -20,6 +20,7 @@ import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasProperty; + import io.opentelemetry.api.common.AttributeKey; import io.opentelemetry.api.common.Attributes; import java.util.Arrays; @@ -32,12 +33,11 @@ */ public final class AttributesMatchers { - private AttributesMatchers() { } + private AttributesMatchers() { + } - public static Matcher containsEntry( - Matcher> keyMatcher, - Matcher valueMatcher - ) { + public static Matcher containsEntry(Matcher> keyMatcher, + Matcher valueMatcher) { return new IsAttributesContaining<>(keyMatcher, valueMatcher); } @@ -53,10 +53,8 @@ public static Matcher containsEntryWithStringValuesOf(String key, St return containsEntry(AttributeKey.stringArrayKey(key), Arrays.asList(values)); } - public static Matcher containsEntryWithStringValuesOf( - String key, - Matcher> matcher - ) { + public static Matcher containsEntryWithStringValuesOf(String key, + Matcher> matcher) { return new IsAttributesContaining<>(equalTo(AttributeKey.stringArrayKey(key)), matcher); } @@ -64,37 +62,28 @@ private static final class IsAttributesContaining extends TypeSafeMatcher> keyMatcher; private final Matcher valueMatcher; - private IsAttributesContaining( - final Matcher> keyMatcher, - final Matcher valueMatcher - ) { + private IsAttributesContaining(final Matcher> keyMatcher, + final Matcher valueMatcher) { this.keyMatcher = keyMatcher; this.valueMatcher = valueMatcher; } @Override protected boolean matchesSafely(Attributes item) { - return item.asMap().entrySet().stream().anyMatch(e -> allOf( - hasProperty("key", keyMatcher), - hasProperty("value", valueMatcher)) - .matches(e)); + return item.asMap().entrySet().stream().anyMatch( + e -> allOf(hasProperty("key", keyMatcher), hasProperty("value", valueMatcher)).matches(e)); } @Override public void describeMismatchSafely(Attributes item, Description mismatchDescription) { - mismatchDescription - .appendText("Attributes was ") - .appendValueList("[", ", ", "]", item.asMap().entrySet()); + mismatchDescription.appendText("Attributes was ").appendValueList("[", ", ", "]", + item.asMap().entrySet()); } @Override public void describeTo(Description description) { - description - .appendText("Attributes containing [") - .appendDescriptionOf(keyMatcher) - .appendText("->") - .appendDescriptionOf(valueMatcher) - .appendText("]"); + description.appendText("Attributes containing [").appendDescriptionOf(keyMatcher) + .appendText("->").appendDescriptionOf(valueMatcher).appendText("]"); } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java index e24245fb4c62..106e52cc0f91 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/EventMatchers.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.hamcrest.Matchers.equalTo; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.sdk.trace.data.EventData; import org.hamcrest.FeatureMatcher; @@ -28,12 +29,14 @@ */ public final class EventMatchers { - private EventMatchers() { } + private EventMatchers() { + } public static Matcher hasAttributes(Matcher matcher) { - return new FeatureMatcher( - matcher, "EventData having attributes that ", "attributes") { - @Override protected Attributes featureValueOf(EventData actual) { + return new FeatureMatcher(matcher, "EventData having attributes that ", + "attributes") { + @Override + protected Attributes featureValueOf(EventData actual) { return actual.getAttributes(); } }; @@ -45,7 +48,8 @@ public static Matcher hasName(String name) { public static Matcher hasName(Matcher matcher) { return new FeatureMatcher(matcher, "EventData with a name that ", "name") { - @Override protected String featureValueOf(EventData actual) { + @Override + protected String featureValueOf(EventData actual) { return actual.getName(); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java index 01aa61805a21..b51b8ead6064 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/SpanDataMatchers.java @@ -19,6 +19,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; + import io.opentelemetry.api.common.Attributes; import io.opentelemetry.api.trace.SpanKind; import io.opentelemetry.api.trace.StatusCode; @@ -36,21 +37,22 @@ */ public final class SpanDataMatchers { - private SpanDataMatchers() { } + private SpanDataMatchers() { + } public static Matcher hasAttributes(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData having attributes that ", "attributes" - ) { - @Override protected Attributes featureValueOf(SpanData item) { + return new FeatureMatcher(matcher, "SpanData having attributes that ", + "attributes") { + @Override + protected Attributes featureValueOf(SpanData item) { return item.getAttributes(); } }; } public static Matcher hasDuration(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData having duration that ", "duration") { + return new FeatureMatcher(matcher, "SpanData having duration that ", + "duration") { @Override protected Duration featureValueOf(SpanData item) { return Duration.ofNanos(item.getEndEpochNanos() - item.getStartEpochNanos()); @@ -60,28 +62,33 @@ protected Duration featureValueOf(SpanData item) { public static Matcher hasEnded() { return new TypeSafeMatcher() { - @Override protected boolean matchesSafely(SpanData item) { + @Override + protected boolean matchesSafely(SpanData item) { return item.hasEnded(); } - @Override public void describeTo(Description description) { + + @Override + public void describeTo(Description description) { description.appendText("SpanData that hasEnded"); } }; } public static Matcher hasEvents(Matcher> matcher) { - return new FeatureMatcher>( - matcher, "SpanData having events that", "events") { - @Override protected Iterable featureValueOf(SpanData item) { + return new FeatureMatcher>(matcher, + "SpanData having events that", "events") { + @Override + protected Iterable featureValueOf(SpanData item) { return item.getEvents(); } }; } public static Matcher hasKind(SpanKind kind) { - return new FeatureMatcher( - equalTo(kind), "SpanData with kind that", "SpanKind") { - @Override protected SpanKind featureValueOf(SpanData item) { + return new FeatureMatcher(equalTo(kind), "SpanData with kind that", + "SpanKind") { + @Override + protected SpanKind featureValueOf(SpanData item) { return item.getKind(); } }; @@ -93,7 +100,8 @@ public static Matcher hasName(String name) { public static Matcher hasName(Matcher matcher) { return new FeatureMatcher(matcher, "SpanKind with a name that", "name") { - @Override protected String featureValueOf(SpanData item) { + @Override + protected String featureValueOf(SpanData item) { return item.getName(); } }; @@ -109,9 +117,9 @@ public static Matcher hasParentSpanId(SpanData parent) { public static Matcher hasParentSpanId(Matcher matcher) { return new FeatureMatcher(matcher, "SpanKind with a parentSpanId that", - "parentSpanId" - ) { - @Override protected String featureValueOf(SpanData item) { + "parentSpanId") { + @Override + protected String featureValueOf(SpanData item) { return item.getParentSpanId(); } }; @@ -120,13 +128,15 @@ public static Matcher hasParentSpanId(Matcher matcher) { public static Matcher hasStatusWithCode(StatusCode statusCode) { final Matcher matcher = is(equalTo(statusCode)); return new TypeSafeMatcher() { - @Override protected boolean matchesSafely(SpanData item) { + @Override + protected boolean matchesSafely(SpanData item) { final StatusData statusData = item.getStatus(); - return statusData != null - && statusData.getStatusCode() != null - && matcher.matches(statusData.getStatusCode()); + return statusData != null && statusData.getStatusCode() != null + && matcher.matches(statusData.getStatusCode()); } - @Override public void describeTo(Description description) { + + @Override + public void describeTo(Description description) { description.appendText("SpanData with StatusCode that ").appendDescriptionOf(matcher); } }; @@ -137,9 +147,10 @@ public static Matcher hasTraceId(String traceId) { } public static Matcher hasTraceId(Matcher matcher) { - return new FeatureMatcher( - matcher, "SpanData with a traceId that ", "traceId") { - @Override protected String featureValueOf(SpanData item) { + return new FeatureMatcher(matcher, "SpanData with a traceId that ", + "traceId") { + @Override + protected String featureValueOf(SpanData item) { return item.getTraceId(); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java index 71aedbde6497..d2b39bceaa61 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/trace/hamcrest/TraceTestUtil.java @@ -15,12 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.client.trace.hamcrest; import static org.apache.hadoop.hbase.client.trace.hamcrest.AttributesMatchers.containsEntry; import static org.apache.hadoop.hbase.client.trace.hamcrest.SpanDataMatchers.hasAttributes; import static org.hamcrest.Matchers.allOf; + import io.opentelemetry.api.trace.Span; import io.opentelemetry.sdk.trace.data.SpanData; import org.apache.hadoop.hbase.TableName; @@ -30,16 +30,16 @@ public final class TraceTestUtil { - private TraceTestUtil() { } + private TraceTestUtil() { + } /** * All {@link Span}s involving {@code conn} should include these attributes. */ public static Matcher buildConnectionAttributesMatcher(AsyncConnectionImpl conn) { - return hasAttributes(allOf( - containsEntry("db.system", "hbase"), - containsEntry("db.connection_string", "nothing"), - containsEntry("db.user", conn.getUser().toString()))); + return hasAttributes( + allOf(containsEntry("db.system", "hbase"), containsEntry("db.connection_string", "nothing"), + containsEntry("db.user", conn.getUser().toString()))); } /** @@ -47,18 +47,16 @@ public static Matcher buildConnectionAttributesMatcher(AsyncConnection * @see #buildConnectionAttributesMatcher(AsyncConnectionImpl) */ public static Matcher buildConnectionAttributesMatcher(ConnectionImplementation conn) { - return hasAttributes(allOf( - containsEntry("db.system", "hbase"), - containsEntry("db.connection_string", "nothing"), - containsEntry("db.user", conn.getUser().toString()))); + return hasAttributes( + allOf(containsEntry("db.system", "hbase"), containsEntry("db.connection_string", "nothing"), + containsEntry("db.user", conn.getUser().toString()))); } /** * All {@link Span}s involving {@code tableName} should include these attributes. */ public static Matcher buildTableAttributesMatcher(TableName tableName) { - return hasAttributes(allOf( - containsEntry("db.name", tableName.getNamespaceAsString()), + return hasAttributes(allOf(containsEntry("db.name", tableName.getNamespaceAsString()), containsEntry("db.hbase.table", tableName.getNameAsString()))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java index 275fb0931aec..fd2cd40421b7 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/exceptions/TestClientExceptionsUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java index 868f3b7fda43..7ad129f8e417 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestComparators.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestComparators { @ClassRule @@ -105,55 +105,55 @@ public void testCellFieldsCompare() throws Exception { assertFalse(PrivateCellUtil.qualifierStartsWith(kv, q2)); assertFalse(PrivateCellUtil.qualifierStartsWith(kv, Bytes.toBytes("longerthanthequalifier"))); - //Binary component comparisons + // Binary component comparisons byte[] val = Bytes.toBytes("abcd"); kv = new KeyValue(r0, f, q1, val); buffer = ByteBuffer.wrap(kv.getBuffer()); bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); - //equality check - //row comparison - //row is "row0"(set by variable r0) - //and we are checking for equality to 'o' at position 1 - //'r' is at position 0. + // equality check + // row comparison + // row is "row0"(set by variable r0) + // and we are checking for equality to 'o' at position 1 + // 'r' is at position 0. byte[] component = Bytes.toBytes("o"); comparable = new BinaryComponentComparator(component, 1); assertEquals(0, PrivateCellUtil.compareRow(bbCell, comparable)); assertEquals(0, PrivateCellUtil.compareRow(kv, comparable)); - //value comparison - //value is "abcd"(set by variable val). - //and we are checking for equality to 'c' at position 2. - //'a' is at position 0. + // value comparison + // value is "abcd"(set by variable val). + // and we are checking for equality to 'c' at position 2. + // 'a' is at position 0. component = Bytes.toBytes("c"); comparable = new BinaryComponentComparator(component, 2); - assertEquals(0,PrivateCellUtil.compareValue(bbCell, comparable)); - assertEquals(0,PrivateCellUtil.compareValue(kv, comparable)); + assertEquals(0, PrivateCellUtil.compareValue(bbCell, comparable)); + assertEquals(0, PrivateCellUtil.compareValue(kv, comparable)); - //greater than + // greater than component = Bytes.toBytes("z"); - //checking for greater than at position 1. - //for both row("row0") and value("abcd") - //'z' > 'r' + // checking for greater than at position 1. + // for both row("row0") and value("abcd") + // 'z' > 'r' comparable = new BinaryComponentComparator(component, 1); - //row comparison + // row comparison assertTrue(PrivateCellUtil.compareRow(bbCell, comparable) > 0); assertTrue(PrivateCellUtil.compareRow(kv, comparable) > 0); - //value comparison - //'z' > 'a' + // value comparison + // 'z' > 'a' assertTrue(PrivateCellUtil.compareValue(bbCell, comparable) > 0); assertTrue(PrivateCellUtil.compareValue(kv, comparable) > 0); - //less than + // less than component = Bytes.toBytes("a"); - //checking for less than at position 1 for row ("row0") + // checking for less than at position 1 for row ("row0") comparable = new BinaryComponentComparator(component, 1); - //row comparison - //'a' < 'r' + // row comparison + // 'a' < 'r' assertTrue(PrivateCellUtil.compareRow(bbCell, comparable) < 0); assertTrue(PrivateCellUtil.compareRow(kv, comparable) < 0); - //value comparison - //checking for less than at position 2 for value("abcd") - //'a' < 'c' + // value comparison + // checking for less than at position 2 for value("abcd") + // 'a' < 'c' comparable = new BinaryComponentComparator(component, 2); assertTrue(PrivateCellUtil.compareValue(bbCell, comparable) < 0); assertTrue(PrivateCellUtil.compareValue(kv, comparable) < 0); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java index 3e03a0796a54..af8931299aa6 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestKeyOnlyFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,12 +64,10 @@ public void testKeyOnly() throws Exception { byte[] q = Bytes.toBytes("qual1"); byte[] v = Bytes.toBytes("val1"); byte[] tags = Bytes.toBytes("tag1"); - KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, - v.length, tags); + KeyValue kv = new KeyValue(r, f, q, 0, q.length, 1234L, Type.Put, v, 0, v.length, tags); ByteBuffer buffer = ByteBuffer.wrap(kv.getBuffer()); - ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, - buffer.remaining()); + ByteBufferKeyValue bbCell = new ByteBufferKeyValue(buffer, 0, buffer.remaining()); // KV format: // Rebuild as: <0:4> @@ -86,41 +84,34 @@ public void testKeyOnly() throws Exception { KeyValue KeyOnlyKeyValue = new KeyValue(newBuffer); KeyOnlyCell keyOnlyCell = new KeyOnlyCell(kv, lenAsVal); - KeyOnlyByteBufferExtendedCell keyOnlyByteBufferedCell = new KeyOnlyByteBufferExtendedCell( - bbCell, lenAsVal); + KeyOnlyByteBufferExtendedCell keyOnlyByteBufferedCell = + new KeyOnlyByteBufferExtendedCell(bbCell, lenAsVal); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyCell)); assertTrue(CellUtil.matchingRows(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingFamily(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(CellUtil - .matchingFamily(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingFamily(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, - keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingQualifier(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, keyOnlyCell)); - assertTrue(KeyOnlyKeyValue.getValueLength() == keyOnlyByteBufferedCell - .getValueLength()); + assertTrue(KeyOnlyKeyValue.getValueLength() == keyOnlyByteBufferedCell.getValueLength()); assertEquals(8 + keyLen + (lenAsVal ? 4 : 0), KeyOnlyKeyValue.getSerializedSize()); assertEquals(8 + keyLen + (lenAsVal ? 4 : 0), keyOnlyCell.getSerializedSize()); if (keyOnlyByteBufferedCell.getValueLength() > 0) { - assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, - keyOnlyByteBufferedCell)); + assertTrue(CellUtil.matchingValue(KeyOnlyKeyValue, keyOnlyByteBufferedCell)); } assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyCell.getTimestamp()); - assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyByteBufferedCell - .getTimestamp()); + assertTrue(KeyOnlyKeyValue.getTimestamp() == keyOnlyByteBufferedCell.getTimestamp()); assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyCell.getTypeByte()); - assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyByteBufferedCell - .getTypeByte()); + assertTrue(KeyOnlyKeyValue.getTypeByte() == keyOnlyByteBufferedCell.getTypeByte()); assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyCell.getTagsLength()); - assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyByteBufferedCell - .getTagsLength()); + assertTrue(KeyOnlyKeyValue.getTagsLength() == keyOnlyByteBufferedCell.getTagsLength()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java index 60c8cd084997..6e1e89634a36 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/filter/TestLongComparator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,12 +33,12 @@ public class TestLongComparator { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestLongComparator.class); - private long[] values = { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, - Long.MAX_VALUE }; + private long[] values = + { Long.MIN_VALUE, -10000000000L, -1000000L, 0L, 1000000L, 10000000000L, Long.MAX_VALUE }; @Test public void testSimple() { - for (int i = 1; i < values.length ; i++) { + for (int i = 1; i < values.length; i++) { for (int j = 0; j < i; j++) { LongComparator cp = new LongComparator(values[i]); assertEquals(1, cp.compareTo(Bytes.toBytes(values[j]))); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java index 62eba1ecea5c..f0d2b55ca188 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestCellBlockBuilder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -82,8 +82,8 @@ static void doBuildCellBlockUndoCellBlock(final CellBlockBuilder builder, final CellScanner cellScanner = sized ? getSizedCellScanner(cells) : CellUtil.createCellScanner(Arrays.asList(cells).iterator()); ByteBuffer bb = builder.buildCellBlock(codec, compressor, cellScanner); - cellScanner = builder.createCellScannerReusingBuffers(codec, compressor, - new SingleByteBuff(bb)); + cellScanner = + builder.createCellScannerReusingBuffers(codec, compressor, new SingleByteBuff(bb)); int i = 0; while (cellScanner.advance()) { i++; @@ -176,7 +176,6 @@ private static void timerTest(final CellBlockBuilder builder, final StopWatch ti /** * For running a few tests of methods herein. - * * @param args the arguments to use for the timer test * @throws IOException if creating the build fails */ diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java index 48a079d3e75b..33b41ef9f969 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestConnectionId.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,15 +33,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class, ClientTests.class}) +@Category({ SmallTests.class, ClientTests.class }) public class TestConnectionId { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestConnectionId.class); + HBaseClassTestRule.forClass(TestConnectionId.class); private Configuration testConfig = HBaseConfiguration.create(); - private User testUser1 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); - private User testUser2 = User.createUserForTesting(testConfig, "test", new String[]{"testgroup"}); + private User testUser1 = + User.createUserForTesting(testConfig, "test", new String[] { "testgroup" }); + private User testUser2 = + User.createUserForTesting(testConfig, "test", new String[] { "testgroup" }); private String serviceName = "test"; private Address address = Address.fromParts("localhost", 999); private ConnectionId connectionId1 = new ConnectionId(testUser1, serviceName, address); @@ -71,9 +73,8 @@ public void testToString() { } /** - * Test if the over-ridden equals method satisfies all the properties - * (reflexive, symmetry, transitive and null) - * along with their hashcode + * Test if the over-ridden equals method satisfies all the properties (reflexive, symmetry, + * transitive and null) along with their hashcode */ @Test public void testEqualsWithHashCode() { @@ -87,8 +88,8 @@ public void testEqualsWithHashCode() { // Test the Transitive Property ConnectionId connectionId3 = new ConnectionId(testUser1, serviceName, address); - assertTrue(connectionId1.equals(connectionId) && connectionId.equals(connectionId3) && - connectionId1.equals(connectionId3)); + assertTrue(connectionId1.equals(connectionId) && connectionId.equals(connectionId3) + && connectionId1.equals(connectionId3)); assertEquals(connectionId.hashCode(), connectionId3.hashCode()); // Test For null @@ -99,8 +100,8 @@ public void testEqualsWithHashCode() { } /** - * Test the hashcode for same object and different object with both hashcode - * function and static hashcode function + * Test the hashcode for same object and different object with both hashcode function and static + * hashcode function */ @Test public void testHashCode() { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java index dc94e91f4fde..1a41710aaeec 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestFailedServersLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestFailedServersLog { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFailedServersLog.class); + HBaseClassTestRule.forClass(TestFailedServersLog.class); static final int TEST_PORT = 9999; @@ -58,14 +58,14 @@ public void setup() { when(mockAppender.getName()).thenReturn("mockAppender"); when(mockAppender.isStarted()).thenReturn(true); ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger(FailedServers.class)).addAppender(mockAppender); + .getLogger(FailedServers.class)).addAppender(mockAppender); } @After public void teardown() { ((org.apache.logging.log4j.core.Logger) org.apache.logging.log4j.LogManager - .getLogger(FailedServers.class)).removeAppender(mockAppender); + .getLogger(FailedServers.class)).removeAppender(mockAppender); } @Test @@ -77,7 +77,7 @@ public void testAddToFailedServersLogging() { @Override public Void answer(InvocationOnMock invocation) throws Throwable { org.apache.logging.log4j.core.LogEvent logEvent = - invocation.getArgument(0, org.apache.logging.log4j.core.LogEvent.class); + invocation.getArgument(0, org.apache.logging.log4j.core.LogEvent.class); level.set(logEvent.getLevel()); msg.set(logEvent.getMessage().getFormattedMessage()); return null; @@ -92,7 +92,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { verify(mockAppender, times(1)).append(any(org.apache.logging.log4j.core.LogEvent.class)); assertEquals(org.apache.logging.log4j.Level.DEBUG, level.get()); - assertEquals("Added failed server with address " + addr.toString() + " to list caused by " + - nullException.toString(), msg.get()); + assertEquals("Added failed server with address " + addr.toString() + " to list caused by " + + nullException.toString(), + msg.get()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java index d829b4bfd654..f4e53bfdb7b8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestHBaseRpcControllerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java index 45da1e8560df..bcb5e87d7b24 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestIPCUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,10 +49,10 @@ public class TestIPCUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestIPCUtil.class); + HBaseClassTestRule.forClass(TestIPCUtil.class); private static Throwable create(Class clazz) throws InstantiationException, - IllegalAccessException, InvocationTargetException, NoSuchMethodException { + IllegalAccessException, InvocationTargetException, NoSuchMethodException { try { Constructor c = clazz.getDeclaredConstructor(); c.setAccessible(true); @@ -79,7 +79,7 @@ private static Throwable create(Class clazz) throws Instant try { Constructor c = - clazz.getDeclaredConstructor(String.class, Throwable.class); + clazz.getDeclaredConstructor(String.class, Throwable.class); c.setAccessible(true); return c.newInstance("error", new Exception("error")); } catch (NoSuchMethodException e) { @@ -87,7 +87,7 @@ private static Throwable create(Class clazz) throws Instant } Constructor c = - clazz.getDeclaredConstructor(Throwable.class, Throwable.class); + clazz.getDeclaredConstructor(Throwable.class, Throwable.class); c.setAccessible(true); return c.newInstance(new Exception("error"), "error"); } @@ -104,15 +104,16 @@ public void testWrapConnectionException() throws Exception { Address addr = Address.fromParts("127.0.0.1", 12345); for (Throwable exception : exceptions) { if (exception instanceof TimeoutException) { - assertThat(IPCUtil.wrapException(addr, null, exception), instanceOf(TimeoutIOException.class)); + assertThat(IPCUtil.wrapException(addr, null, exception), + instanceOf(TimeoutIOException.class)); } else { - IOException ioe = IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, - exception); + IOException ioe = + IPCUtil.wrapException(addr, RegionInfoBuilder.FIRST_META_REGIONINFO, exception); // Assert that the exception contains the Region name if supplied. HBASE-25735. // Not all exceptions get the region stuffed into it. if (ioe.getMessage() != null) { - assertTrue(ioe.getMessage(). - contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); + assertTrue(ioe.getMessage() + .contains(RegionInfoBuilder.FIRST_META_REGIONINFO.getRegionNameAsString())); } assertThat(ioe, instanceOf(exception.getClass())); } @@ -135,8 +136,8 @@ public void run() { if (depth <= IPCUtil.MAX_DEPTH) { if (numElements <= numStackTraceElements.intValue()) { future.completeExceptionally( - new AssertionError("should call run directly but stack trace decreased from " + - numStackTraceElements.intValue() + " to " + numElements)); + new AssertionError("should call run directly but stack trace decreased from " + + numStackTraceElements.intValue() + " to " + numElements)); return; } numStackTraceElements.setValue(numElements); @@ -144,9 +145,9 @@ public void run() { } else { if (numElements >= numStackTraceElements.intValue()) { future.completeExceptionally( - new AssertionError("should call eventLoop.execute to prevent stack overflow but" + - " stack trace increased from " + numStackTraceElements.intValue() + " to " + - numElements)); + new AssertionError("should call eventLoop.execute to prevent stack overflow but" + + " stack trace increased from " + numStackTraceElements.intValue() + " to " + + numElements)); } else { future.complete(null); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java index 8782fe116b07..e91b0132e787 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestNettyRpcConnection.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -47,7 +47,7 @@ public class TestNettyRpcConnection { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNettyRpcConnection.class); + HBaseClassTestRule.forClass(TestNettyRpcConnection.class); private static final Logger LOG = LoggerFactory.getLogger(TestNettyRpcConnection.class); @@ -59,7 +59,7 @@ public class TestNettyRpcConnection { public static void setUp() throws IOException { CLIENT = new NettyRpcClient(HBaseConfiguration.create()); CONN = new NettyRpcConnection(CLIENT, - new ConnectionId(User.getCurrent(), "test", Address.fromParts("localhost", 1234))); + new ConnectionId(User.getCurrent(), "test", Address.fromParts("localhost", 1234))); } @AfterClass diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java index ba1e27258d2d..3b05391b42d9 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/ipc/TestRpcClientDeprecatedNameMapping.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java index 62e204a65a2c..3e59a09fae2e 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestQuotaFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -33,16 +33,11 @@ public class TestQuotaFilter { @Test public void testClassMethodsAreBuilderStyle() { - /* ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * QuotaFilter qf - * = new QuotaFilter() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: QuotaFilter qf = new QuotaFilter() .setFoo(foo) + * .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" returns the + * declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(QuotaFilter.class); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java index 37a21dc2b18b..afc8e24d60f4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaGlobalsSettingsBypass.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -21,7 +22,6 @@ import static org.junit.Assert.fail; import java.io.IOException; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.quotas.QuotaSettingsFactory.QuotaGlobalsSettingsBypass; @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestQuotaGlobalsSettingsBypass { @ClassRule @@ -142,6 +142,7 @@ void expectFailure(QuotaSettings one, QuotaSettings two) throws IOException { try { one.merge(two); fail("Expected to see an Exception merging " + two + " into " + one); - } catch (IllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java index 6b9212f6260f..d7c21c8fda35 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestQuotaSettingsFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,8 +53,7 @@ public class TestQuotaSettingsFactory { @Test public void testAllQuotasAddedToList() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1024L * 1024L * 1024L * 50L) // 50G .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) // Disable the table .build(); final long readLimit = 1000; @@ -67,8 +66,7 @@ public void testAllQuotasAddedToList() { .setWriteNum(TimedQuota.newBuilder().setSoftLimit(writeLimit) .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build()) .build(); - final Quotas quotas = Quotas.newBuilder() - .setSpace(spaceQuota) // Set the FS quotas + final Quotas quotas = Quotas.newBuilder().setSpace(spaceQuota) // Set the FS quotas .setThrottle(throttle) // Set some RPC limits .build(); final TableName tn = TableName.valueOf("my_table"); @@ -125,19 +123,15 @@ public void testAllQuotasAddedToList() { @Test(expected = IllegalArgumentException.class) public void testNeitherTableNorNamespace() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build(); QuotaSettingsFactory.fromSpace(null, null, spaceQuota); } @Test(expected = IllegalArgumentException.class) public void testBothTableAndNamespace() { - final SpaceQuota spaceQuota = SpaceQuota.newBuilder() - .setSoftLimit(1L) - .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE) - .build(); + final SpaceQuota spaceQuota = SpaceQuota.newBuilder().setSoftLimit(1L) + .setViolationPolicy(QuotaProtos.SpaceViolationPolicy.DISABLE).build(); QuotaSettingsFactory.fromSpace(TableName.valueOf("foo"), "bar", spaceQuota); } @@ -150,7 +144,7 @@ public void testSpaceLimitSettings() { QuotaSettingsFactory.limitTableSpace(tableName, sizeLimit, violationPolicy); assertNotNull("QuotaSettings should not be null", settings); assertTrue("Should be an instance of SpaceLimitSettings", - settings instanceof SpaceLimitSettings); + settings instanceof SpaceLimitSettings); SpaceLimitSettings spaceLimitSettings = (SpaceLimitSettings) settings; SpaceLimitRequest protoRequest = spaceLimitSettings.getProto(); assertTrue("Request should have a SpaceQuota", protoRequest.hasQuota()); @@ -167,7 +161,7 @@ public void testSpaceLimitSettingsForDeletes() { QuotaSettings nsSettings = QuotaSettingsFactory.removeNamespaceSpaceLimit(ns); assertNotNull("QuotaSettings should not be null", nsSettings); assertTrue("Should be an instance of SpaceLimitSettings", - nsSettings instanceof SpaceLimitSettings); + nsSettings instanceof SpaceLimitSettings); SpaceLimitRequest nsProto = ((SpaceLimitSettings) nsSettings).getProto(); assertTrue("Request should have a SpaceQuota", nsProto.hasQuota()); assertTrue("The remove attribute should be true", nsProto.getQuota().getRemove()); @@ -175,7 +169,7 @@ public void testSpaceLimitSettingsForDeletes() { QuotaSettings tableSettings = QuotaSettingsFactory.removeTableSpaceLimit(tn); assertNotNull("QuotaSettings should not be null", tableSettings); assertTrue("Should be an instance of SpaceLimitSettings", - tableSettings instanceof SpaceLimitSettings); + tableSettings instanceof SpaceLimitSettings); SpaceLimitRequest tableProto = ((SpaceLimitSettings) tableSettings).getProto(); assertTrue("Request should have a SpaceQuota", tableProto.hasQuota()); assertTrue("The remove attribute should be true", tableProto.getQuota().getRemove()); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java index 2406d10ed0a0..d14c4f539af1 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestSpaceLimitSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ /** * Test class for {@link SpaceLimitSettings}. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestSpaceLimitSettings { @ClassRule @@ -130,14 +130,14 @@ public void testNamespaceQuota() { @Test public void testQuotaMerging() throws IOException { TableName tn = TableName.valueOf("foo"); - QuotaSettings originalSettings = QuotaSettingsFactory.limitTableSpace( - tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE); - QuotaSettings largerSizeLimit = QuotaSettingsFactory.limitTableSpace( - tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE); - QuotaSettings differentPolicy = QuotaSettingsFactory.limitTableSpace( - tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); - QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace( - "ns1", 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); + QuotaSettings originalSettings = + QuotaSettingsFactory.limitTableSpace(tn, 1024L * 1024L, SpaceViolationPolicy.DISABLE); + QuotaSettings largerSizeLimit = + QuotaSettingsFactory.limitTableSpace(tn, 5L * 1024L * 1024L, SpaceViolationPolicy.DISABLE); + QuotaSettings differentPolicy = + QuotaSettingsFactory.limitTableSpace(tn, 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); + QuotaSettings incompatibleSettings = QuotaSettingsFactory.limitNamespaceSpace("ns1", + 5L * 1024L * 1024L, SpaceViolationPolicy.NO_WRITES); assertEquals(originalSettings.merge(largerSizeLimit), largerSizeLimit); assertEquals(originalSettings.merge(differentPolicy), differentPolicy); @@ -145,7 +145,7 @@ public void testQuotaMerging() throws IOException { originalSettings.merge(incompatibleSettings); fail("Should not be able to merge a Table space quota with a namespace space quota."); } catch (IllegalArgumentException e) { - //pass + // pass } } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java index 53fb9bd3e927..0d75ea261e75 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/quotas/TestThrottleSettings.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.ThrottleRequest; import org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.TimedQuota; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestThrottleSettings { @ClassRule @@ -44,15 +44,13 @@ public class TestThrottleSettings { @Test public void testMerge() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); TimedQuota tq2 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest tr2 = ThrottleRequest.newBuilder().setTimedQuota(tq2) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); @@ -66,15 +64,13 @@ public void testMerge() throws IOException { @Test public void testIncompatibleThrottleTypes() throws IOException { TimedQuota requestsQuota = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest requestsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(requestsQuota) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, requestsQuotaReq); TimedQuota readsQuota = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.SECONDS).build(); ThrottleRequest readsQuotaReq = ThrottleRequest.newBuilder().setTimedQuota(readsQuota) .setType(QuotaProtos.ThrottleType.READ_NUMBER).build(); @@ -89,17 +85,15 @@ public void testIncompatibleThrottleTypes() throws IOException { @Test public void testNoThrottleReturnsOriginal() throws IOException { TimedQuota tq1 = TimedQuota.newBuilder().setSoftLimit(10) - .setScope(QuotaProtos.QuotaScope.MACHINE) - .setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); + .setScope(QuotaProtos.QuotaScope.MACHINE).setTimeUnit(HBaseProtos.TimeUnit.MINUTES).build(); ThrottleRequest tr1 = ThrottleRequest.newBuilder().setTimedQuota(tq1) .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); ThrottleSettings orig = new ThrottleSettings("joe", null, null, null, tr1); - ThrottleRequest tr2 = ThrottleRequest.newBuilder() - .setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); + ThrottleRequest tr2 = + ThrottleRequest.newBuilder().setType(QuotaProtos.ThrottleType.REQUEST_NUMBER).build(); - assertTrue( - "The same object should be returned by merge, but it wasn't", + assertTrue("The same object should be returned by merge, but it wasn't", orig == orig.merge(new ThrottleSettings("joe", null, null, null, tr2))); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java index ae2d4262e647..bfb5d1642b73 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationPeerConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.List; import java.util.Map; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.testclassification.ClientTests; @@ -37,7 +36,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; import org.apache.hbase.thirdparty.com.google.common.collect.Sets; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestReplicationPeerConfig { @ClassRule @@ -53,16 +52,11 @@ public class TestReplicationPeerConfig { @Test public void testClassMethodsAreBuilderStyle() { - /* ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods - * can be chainable together: - * . For example: - * ReplicationPeerConfig htd - * = new ReplicationPeerConfig() - * .setFoo(foo) - * .setBar(bar) - * .setBuz(buz) - * - * This test ensures that all methods starting with "set" returns the declaring object + /* + * ReplicationPeerConfig should have a builder style setup where setXXX/addXXX methods can be + * chainable together: . For example: ReplicationPeerConfig htd = new ReplicationPeerConfig() + * .setFoo(foo) .setBar(bar) .setBuz(buz) This test ensures that all methods starting with "set" + * returns the declaring object */ BuilderStyleTest.assertClassesAreBuilderStyle(ReplicationPeerConfig.class); @@ -72,48 +66,39 @@ public void testClassMethodsAreBuilderStyle() { public void testNeedToReplicateWithReplicatingAll() { // 1. replication_all flag is true, no namespaces and table-cfs config ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .build(); + .setReplicateAllUserTables(true).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // 2. replicate_all flag is true, and config in excludedTableCfs // Exclude empty table-cfs map peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(Maps.newHashMap()) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(Maps.newHashMap()).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude table B Map> tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_B)); // 3. replicate_all flag is true, and config in excludeNamespaces // Exclude empty namespace set peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet()) - .build(); + .setReplicateAllUserTables(true).setExcludeNamespaces(Sets.newHashSet()).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude namespace other - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Exclude namespace replication - peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + peerConfig = + new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl().setReplicateAllUserTables(true) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // 4. replicate_all flag is true, and config excludeNamespaces and excludedTableCfs both @@ -121,30 +106,24 @@ public void testNeedToReplicateWithReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .setExcludeTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) + .setExcludeTableCFsMap(tableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Namespaces config conflicts with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_B)); tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(tableCfs) - .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(tableCfs) + .setExcludeNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_B)); } @@ -156,48 +135,38 @@ public void testNeedToReplicateWithoutReplicatingAll() { // 1. replication_all flag is false, no namespaces and table-cfs config peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .build(); + .setReplicateAllUserTables(false).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // 2. replicate_all flag is false, and only config table-cfs in peer // Set empty table-cfs map peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(Maps.newHashMap()) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(Maps.newHashMap()).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set table B tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_B)); // 3. replication_all flag is false, and only config namespace in peer // Set empty namespace set peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet()) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet()).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set namespace other peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); // Set namespace replication peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) + .build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // 4. replicate_all flag is false, and config namespaces and table-cfs both @@ -205,29 +174,23 @@ public void testNeedToReplicateWithoutReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs) + .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); // Namespaces config conflicts with table-cfs config tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs) + .setNamespaces(Sets.newHashSet(NAMESPACE_OTHER)).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_B, null); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setNamespaces(Sets.newHashSet(NAMESPACE_REPLICATE)) + .setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); } @@ -236,9 +199,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { Map> excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, null); ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -246,9 +207,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, Lists.newArrayList()); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertFalse(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -256,9 +215,7 @@ public void testNeedToReplicateCFWithReplicatingAll() { excludeTableCfs = Maps.newHashMap(); excludeTableCfs.put(TABLE_A, Lists.newArrayList(Bytes.toString(FAMILY1))); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(true) - .setExcludeTableCFsMap(excludeTableCfs) - .build(); + .setReplicateAllUserTables(true).setExcludeTableCFsMap(excludeTableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -269,9 +226,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { Map> tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, null); ReplicationPeerConfig peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -279,9 +234,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, Lists.newArrayList()); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY2)); @@ -289,9 +242,7 @@ public void testNeedToReplicateCFWithoutReplicatingAll() { tableCfs = Maps.newHashMap(); tableCfs.put(TABLE_A, Lists.newArrayList(Bytes.toString(FAMILY1))); peerConfig = new ReplicationPeerConfig.ReplicationPeerConfigBuilderImpl() - .setReplicateAllUserTables(false) - .setTableCFsMap(tableCfs) - .build(); + .setReplicateAllUserTables(false).setTableCFsMap(tableCfs).build(); assertTrue(peerConfig.needToReplicate(TABLE_A)); assertTrue(peerConfig.needToReplicate(TABLE_A, FAMILY1)); assertFalse(peerConfig.needToReplicate(TABLE_A, FAMILY2)); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java index cf5939031b02..70b3a1a6e12d 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestEncryptionUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.security.Key; import java.security.KeyException; - import javax.crypto.spec.SecretKeySpec; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -38,7 +37,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({ClientTests.class, SmallTests.class}) +@Category({ ClientTests.class, SmallTests.class }) public class TestEncryptionUtil { private static final String INVALID_HASH_ALG = "this-hash-algorithm-not-exists hopefully... :)"; @@ -50,7 +49,7 @@ public class TestEncryptionUtil { // There does not seem to be a ready way to test either getKeyFromBytesOrMasterKey // or createEncryptionContext, and the existing code under MobUtils appeared to be - // untested. Not ideal! + // untested. Not ideal! @Test public void testKeyWrappingUsingHashAlgDefault() throws Exception { @@ -146,15 +145,14 @@ private void testKeyWrapping(String hashAlgorithm) throws Exception { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + if (!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); } // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key @@ -168,7 +166,7 @@ private void testKeyWrapping(String hashAlgorithm) throws Exception { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); // unwrap with an incorrect key try { @@ -183,7 +181,7 @@ private void testWALKeyWrapping(String hashAlgorithm) throws Exception { // set up the key provider for testing to resolve a key for our test subject Configuration conf = new Configuration(); // we don't need HBaseConfiguration for this conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName()); - if(!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { + if (!hashAlgorithm.equals(DEFAULT_HASH_ALGORITHM)) { conf.set(Encryption.CRYPTO_KEY_HASH_ALGORITHM_CONF_KEY, hashAlgorithm); } @@ -204,7 +202,7 @@ private void testWALKeyWrapping(String hashAlgorithm) throws Exception { assertTrue(unwrappedKey instanceof SecretKeySpec); // did we get back what we wrapped? assertTrue("Unwrapped key bytes do not match original", - Bytes.equals(keyBytes, unwrappedKey.getEncoded())); + Bytes.equals(keyBytes, unwrappedKey.getEncoded())); } private void testKeyWrappingWithMismatchingAlgorithms(Configuration conf) throws Exception { @@ -215,8 +213,7 @@ private void testKeyWrappingWithMismatchingAlgorithms(Configuration conf) throws // generate a test key byte[] keyBytes = new byte[AES.KEY_LENGTH]; Bytes.secureRandom(keyBytes); - String algorithm = - conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); + String algorithm = conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES); Key key = new SecretKeySpec(keyBytes, algorithm); // wrap the test key diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java index 538a9b91c3c5..8de94c9e6649 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Strings; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestHBaseSaslRpcClient { @ClassRule @@ -82,14 +82,13 @@ public class TestHBaseSaslRpcClient { private static final Logger LOG = LoggerFactory.getLogger(TestHBaseSaslRpcClient.class); - @Rule public ExpectedException exception = ExpectedException.none(); @Test public void testSaslClientUsesGivenRpcProtection() throws Exception { - Token token = createTokenMockWithCredentials(DEFAULT_USER_NAME, - DEFAULT_USER_PASSWORD); + Token token = + createTokenMockWithCredentials(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD); DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider(); for (SaslUtil.QualityOfProtection qop : SaslUtil.QualityOfProtection.values()) { String negotiatedQop = new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, token, @@ -114,7 +113,7 @@ public void testDigestSaslClientCallbackHandler() throws UnsupportedCallbackExce final RealmCallback realmCallback = mock(RealmCallback.class); // We can provide a realmCallback, but HBase presently does nothing with it. - Callback[] callbackArray = {nameCallback, passwordCallback, realmCallback}; + Callback[] callbackArray = { nameCallback, passwordCallback, realmCallback }; final DigestSaslClientCallbackHandler saslClCallbackHandler = new DigestSaslClientCallbackHandler(token); saslClCallbackHandler.handle(callbackArray); @@ -132,7 +131,7 @@ public void testDigestSaslClientCallbackHandlerWithException() { try { saslClCallbackHandler.handle(new Callback[] { mock(TextOutputCallback.class) }); } catch (UnsupportedCallbackException expEx) { - //expected + // expected } catch (Exception ex) { fail("testDigestSaslClientCallbackHandlerWithException error : " + ex.getMessage()); } @@ -140,7 +139,7 @@ public void testDigestSaslClientCallbackHandlerWithException() { @Test public void testHBaseSaslRpcClientCreation() throws Exception { - //creation kerberos principal check section + // creation kerberos principal check section assertFalse(assertSuccessCreationKerberosPrincipal(null)); assertFalse(assertSuccessCreationKerberosPrincipal("DOMAIN.COM")); assertFalse(assertSuccessCreationKerberosPrincipal("principal/DOMAIN.COM")); @@ -150,22 +149,22 @@ public void testHBaseSaslRpcClientCreation() throws Exception { LOG.warn("Could not create a SASL client with valid Kerberos credential"); } - //creation digest principal check section + // creation digest principal check section assertFalse(assertSuccessCreationDigestPrincipal(null, null)); assertFalse(assertSuccessCreationDigestPrincipal("", "")); assertFalse(assertSuccessCreationDigestPrincipal("", null)); assertFalse(assertSuccessCreationDigestPrincipal(null, "")); assertTrue(assertSuccessCreationDigestPrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - //creation simple principal check section + // creation simple principal check section assertFalse(assertSuccessCreationSimplePrincipal("", "")); assertFalse(assertSuccessCreationSimplePrincipal(null, null)); assertFalse(assertSuccessCreationSimplePrincipal(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - //exceptions check section + // exceptions check section assertTrue(assertIOExceptionThenSaslClientIsNull(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); - assertTrue(assertIOExceptionWhenGetStreamsBeforeConnectCall( - DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); + assertTrue( + assertIOExceptionWhenGetStreamsBeforeConnectCall(DEFAULT_USER_NAME, DEFAULT_USER_PASSWORD)); } @Test @@ -182,9 +181,8 @@ public void testAuthMethodReadWrite() throws IOException { assertAuthMethodWrite(out, AuthMethod.DIGEST); } - private void assertAuthMethodRead(DataInputBuffer in, AuthMethod authMethod) - throws IOException { - in.reset(new byte[] {authMethod.code}, 1); + private void assertAuthMethodRead(DataInputBuffer in, AuthMethod authMethod) throws IOException { + in.reset(new byte[] { authMethod.code }, 1); assertEquals(authMethod, AuthMethod.read(in)); } @@ -214,15 +212,15 @@ public SaslClient createClient(Configuration conf, InetAddress serverAddress, try { rpcClient.getInputStream(); - } catch(IOException ex) { - //Sasl authentication exchange hasn't completed yet + } catch (IOException ex) { + // Sasl authentication exchange hasn't completed yet inState = true; } try { rpcClient.getOutputStream(); - } catch(IOException ex) { - //Sasl authentication exchange hasn't completed yet + } catch (IOException ex) { + // Sasl authentication exchange hasn't completed yet outState = true; } @@ -233,14 +231,13 @@ private boolean assertIOExceptionThenSaslClientIsNull(String principal, String p try { DigestSaslClientAuthenticationProvider provider = new DigestSaslClientAuthenticationProvider() { - @Override - public SaslClient createClient(Configuration conf, InetAddress serverAddress, - SecurityInfo securityInfo, - Token token, boolean fallbackAllowed, - Map saslProps) { - return null; - } - }; + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddress, + SecurityInfo securityInfo, Token token, + boolean fallbackAllowed, Map saslProps) { + return null; + } + }; new HBaseSaslRpcClient(HBaseConfiguration.create(), provider, createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); @@ -254,7 +251,7 @@ private boolean assertSuccessCreationKerberosPrincipal(String principal) { HBaseSaslRpcClient rpcClient = null; try { rpcClient = createSaslRpcClientForKerberos(principal); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; @@ -267,7 +264,7 @@ private boolean assertSuccessCreationDigestPrincipal(String principal, String pa new DigestSaslClientAuthenticationProvider(), createTokenMockWithCredentials(principal, password), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; @@ -277,22 +274,20 @@ private boolean assertSuccessCreationSimplePrincipal(String principal, String pa HBaseSaslRpcClient rpcClient = null; try { rpcClient = createSaslRpcClientSimple(principal, password); - } catch(Exception ex) { + } catch (Exception ex) { LOG.error(ex.getMessage(), ex); } return rpcClient != null; } - private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) - throws IOException { + private HBaseSaslRpcClient createSaslRpcClientForKerberos(String principal) throws IOException { return new HBaseSaslRpcClient(HBaseConfiguration.create(), new GssSaslClientAuthenticationProvider(), createTokenMock(), Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); } - private Token createTokenMockWithCredentials( - String principal, String password) - throws IOException { + private Token createTokenMockWithCredentials(String principal, + String password) throws IOException { Token token = createTokenMock(); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(password)) { when(token.getIdentifier()).thenReturn(Bytes.toBytes(DEFAULT_USER_NAME)); @@ -314,20 +309,20 @@ private Token createTokenMock() { } @Test(expected = IOException.class) - public void testFailedEvaluateResponse() throws IOException { - //prep mockin the SaslClient + public void testFailedEvaluateResponse() throws IOException { + // prep mockin the SaslClient SimpleSaslClientAuthenticationProvider mockProvider = - Mockito.mock(SimpleSaslClientAuthenticationProvider.class); + Mockito.mock(SimpleSaslClientAuthenticationProvider.class); SaslClient mockClient = Mockito.mock(SaslClient.class); Assert.assertNotNull(mockProvider); Assert.assertNotNull(mockClient); Mockito.when(mockProvider.createClient(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.anyBoolean(), Mockito.any())).thenReturn(mockClient); - HBaseSaslRpcClient rpcClient = new HBaseSaslRpcClient(HBaseConfiguration.create(), - mockProvider, createTokenMock(), - Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); + HBaseSaslRpcClient rpcClient = + new HBaseSaslRpcClient(HBaseConfiguration.create(), mockProvider, createTokenMock(), + Mockito.mock(InetAddress.class), Mockito.mock(SecurityInfo.class), false); - //simulate getting an error from a failed saslServer.evaluateResponse + // simulate getting an error from a failed saslServer.evaluateResponse DataOutputBuffer errorBuffer = new DataOutputBuffer(); errorBuffer.writeInt(SaslStatus.ERROR.state); WritableUtils.writeString(errorBuffer, IOException.class.getName()); @@ -337,7 +332,7 @@ mockProvider, createTokenMock(), in.reset(errorBuffer.getData(), 0, errorBuffer.getLength()); DataOutputBuffer out = new DataOutputBuffer(); - //simulate that authentication exchange has completed quickly after sending the token + // simulate that authentication exchange has completed quickly after sending the token Mockito.when(mockClient.isComplete()).thenReturn(true); rpcClient.saslConnect(in, out); } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java index 36f29dec240e..2b5df2734da4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.ExpectedException; -@Category({SecurityTests.class, SmallTests.class}) +@Category({ SecurityTests.class, SmallTests.class }) public class TestSaslUtil { @ClassRule diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java index eff3b5f8dd0a..b684ff7f8adc 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestDefaultProviderSelector.java @@ -23,7 +23,6 @@ import java.util.Collections; import java.util.HashSet; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -32,7 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestDefaultProviderSelector { @ClassRule @@ -40,6 +39,7 @@ public class TestDefaultProviderSelector { HBaseClassTestRule.forClass(TestDefaultProviderSelector.class); BuiltInProviderSelector selector; + @Before public void setup() { selector = new BuiltInProviderSelector(); @@ -70,9 +70,9 @@ public void testDuplicateProviders() { @Test public void testExpectedProviders() { - HashSet providers = new HashSet<>(Arrays.asList( - new SimpleSaslClientAuthenticationProvider(), new GssSaslClientAuthenticationProvider(), - new DigestSaslClientAuthenticationProvider())); + HashSet providers = + new HashSet<>(Arrays.asList(new SimpleSaslClientAuthenticationProvider(), + new GssSaslClientAuthenticationProvider(), new DigestSaslClientAuthenticationProvider())); selector.configure(new Configuration(false), providers); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java index 2b399593e7c1..37f6e9df3852 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/provider/TestSaslClientAuthenticationProviders.java @@ -25,9 +25,7 @@ import java.net.InetAddress; import java.util.HashMap; import java.util.Map; - import javax.security.sasl.SaslClient; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -44,7 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.UserInformation; -@Category({SmallTests.class, SecurityTests.class}) +@Category({ SmallTests.class, SecurityTests.class }) public class TestSaslClientAuthenticationProviders { @ClassRule @@ -53,7 +51,7 @@ public class TestSaslClientAuthenticationProviders { @Test public void testCannotAddTheSameProviderTwice() { - HashMap registeredProviders = new HashMap<>(); + HashMap registeredProviders = new HashMap<>(); SaslClientAuthenticationProvider p1 = new SimpleSaslClientAuthenticationProvider(); SaslClientAuthenticationProvider p2 = new SimpleSaslClientAuthenticationProvider(); @@ -62,10 +60,11 @@ public void testCannotAddTheSameProviderTwice() { try { SaslClientAuthenticationProviders.addProviderIfNotExists(p2, registeredProviders); - } catch (RuntimeException e) {} + } catch (RuntimeException e) { + } assertSame("Expected the original provider to be present", p1, - registeredProviders.entrySet().iterator().next().getValue()); + registeredProviders.entrySet().iterator().next().getValue()); } @Test @@ -89,58 +88,66 @@ public void testInstanceIsCached() { public void testDifferentConflictingImplementationsFail() { Configuration conf = HBaseConfiguration.create(); conf.setStrings(SaslClientAuthenticationProviders.EXTRA_PROVIDERS_KEY, - ConflictingProvider1.class.getName(), ConflictingProvider2.class.getName()); + ConflictingProvider1.class.getName(), ConflictingProvider2.class.getName()); SaslClientAuthenticationProviders.getInstance(conf); } static class ConflictingProvider1 implements SaslClientAuthenticationProvider { - static final SaslAuthMethod METHOD1 = new SaslAuthMethod( - "FOO", (byte)12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); + static final SaslAuthMethod METHOD1 = + new SaslAuthMethod("FOO", (byte) 12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); public ConflictingProvider1() { } - @Override public SaslAuthMethod getSaslAuthMethod() { + @Override + public SaslAuthMethod getSaslAuthMethod() { return METHOD1; } - @Override public String getTokenKind() { + @Override + public String getTokenKind() { return null; } - @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, Token token, boolean fallbackAllowed, Map saslProps) throws IOException { return null; } - @Override public UserInformation getUserInfo(User user) { + @Override + public UserInformation getUserInfo(User user) { return null; } } static class ConflictingProvider2 implements SaslClientAuthenticationProvider { - static final SaslAuthMethod METHOD2 = new SaslAuthMethod( - "BAR", (byte)12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); + static final SaslAuthMethod METHOD2 = + new SaslAuthMethod("BAR", (byte) 12, "DIGEST-MD5", AuthenticationMethod.SIMPLE); public ConflictingProvider2() { } - @Override public SaslAuthMethod getSaslAuthMethod() { + @Override + public SaslAuthMethod getSaslAuthMethod() { return METHOD2; } - @Override public String getTokenKind() { + @Override + public String getTokenKind() { return null; } - @Override public SaslClient createClient(Configuration conf, InetAddress serverAddr, + @Override + public SaslClient createClient(Configuration conf, InetAddress serverAddr, SecurityInfo securityInfo, Token token, boolean fallbackAllowed, Map saslProps) throws IOException { return null; } - @Override public UserInformation getUserInfo(User user) { + @Override + public UserInformation getUserInfo(User user) { return null; } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java index 50db3a99d22a..8ce30892dec0 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/token/TestClientTokenUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,9 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.io.Closeables; + import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; @Category(SmallTests.class) @@ -40,7 +42,7 @@ public class TestClientTokenUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestClientTokenUtil.class); + HBaseClassTestRule.forClass(TestClientTokenUtil.class); private URLClassLoader cl; @@ -66,7 +68,7 @@ public void testObtainToken() throws Exception { shouldInjectFault.set(null, injected); try { - ClientTokenUtil.obtainToken((Connection)null); + ClientTokenUtil.obtainToken((Connection) null); fail("Should have injected exception."); } catch (IOException e) { Throwable t = e; @@ -83,7 +85,7 @@ public void testObtainToken() throws Exception { } Boolean loaded = (Boolean) cl.loadClass(ProtobufUtil.class.getCanonicalName()) - .getDeclaredMethod("isClassLoaderLoaded").invoke(null); + .getDeclaredMethod("isClassLoaderLoaded").invoke(null); assertFalse("Should not have loaded DynamicClassLoader", loaded); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java index 317dff9efebc..7c4166c59b77 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/shaded/protobuf/TestProtobufUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,7 +72,8 @@ public class TestProtobufUtil { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestProtobufUtil.class); private static final String TAG_STR = "tag-1"; - private static final byte TAG_TYPE = (byte)10; + private static final byte TAG_TYPE = (byte) 10; + public TestProtobufUtil() { } @@ -93,7 +94,6 @@ public void testException() throws IOException { /** * Test basic Get conversions. - * * @throws IOException if the conversion to a {@link Get} fails */ @Test @@ -126,9 +126,8 @@ public void testGet() throws IOException { /** * Test Delete Mutate conversions. - * * @throws IOException if the conversion to a {@link Delete} or a - * {@link org.apache.hadoop.hbase.client.Mutation} fails + * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @Test public void testDelete() throws IOException { @@ -161,22 +160,18 @@ public void testDelete() throws IOException { // delete always have empty value, // add empty value to the original mutate - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { + for (ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { + for (QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { qualifier.setValue(ByteString.EMPTY); } } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.DELETE, delete)); + assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.DELETE, delete)); } /** * Test Put Mutate conversions. - * * @throws IOException if the conversion to a {@link Put} or a - * {@link org.apache.hadoop.hbase.client.Mutation} fails + * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @Test public void testPut() throws IOException { @@ -210,22 +205,18 @@ public void testPut() throws IOException { // value level timestamp specified, // add the timestamp to the original mutate long timestamp = put.getTimestamp(); - for (ColumnValue.Builder column: - mutateBuilder.getColumnValueBuilderList()) { - for (QualifierValue.Builder qualifier: - column.getQualifierValueBuilderList()) { + for (ColumnValue.Builder column : mutateBuilder.getColumnValueBuilderList()) { + for (QualifierValue.Builder qualifier : column.getQualifierValueBuilderList()) { if (!qualifier.hasTimestamp()) { qualifier.setTimestamp(timestamp); } } } - assertEquals(mutateBuilder.build(), - ProtobufUtil.toMutation(MutationType.PUT, put)); + assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(MutationType.PUT, put)); } /** * Test basic Scan conversions. - * * @throws IOException if the conversion to a {@link org.apache.hadoop.hbase.client.Scan} fails */ @Test @@ -259,8 +250,7 @@ public void testScan() throws IOException { scanBuilder.setIncludeStopRow(false); ClientProtos.Scan expectedProto = scanBuilder.build(); - ClientProtos.Scan actualProto = ProtobufUtil.toScan( - ProtobufUtil.toScan(expectedProto)); + ClientProtos.Scan actualProto = ProtobufUtil.toScan(ProtobufUtil.toScan(expectedProto)); assertEquals(expectedProto, actualProto); } @@ -281,17 +271,15 @@ public void testToCell() { dbb.put(arr); ByteBufferKeyValue offheapKV = new ByteBufferKeyValue(dbb, kv1.getLength(), kv2.getLength()); CellProtos.Cell cell = ProtobufUtil.toCell(offheapKV, false); - Cell newOffheapKV = - ProtobufUtil.toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, - false); + Cell newOffheapKV = ProtobufUtil + .toCell(ExtendedCellBuilderFactory.create(CellBuilderType.SHALLOW_COPY), cell, false); assertTrue(CellComparatorImpl.COMPARATOR.compare(offheapKV, newOffheapKV) == 0); } /** * Test Increment Mutate conversions. - * * @throws IOException if converting to an {@link Increment} or - * {@link org.apache.hadoop.hbase.client.Mutation} fails + * {@link org.apache.hadoop.hbase.client.Mutation} fails */ @Test public void testIncrement() throws IOException { @@ -334,23 +322,20 @@ private MutationProto getIncrementMutation(Long timestamp) { } /** - * Older clients may not send along a timestamp in the MutationProto. Check that we - * default correctly. + * Older clients may not send along a timestamp in the MutationProto. Check that we default + * correctly. */ @Test public void testIncrementNoTimestamp() throws IOException { MutationProto mutation = getIncrementMutation(null); Increment increment = ProtobufUtil.toIncrement(mutation, null); assertEquals(HConstants.LATEST_TIMESTAMP, increment.getTimestamp()); - increment.getFamilyCellMap().values() - .forEach(cells -> - cells.forEach(cell -> - assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); + increment.getFamilyCellMap().values().forEach(cells -> cells + .forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); } /** * Test Append Mutate conversions. - * * @throws IOException if converting to an {@link Append} fails */ @Test @@ -373,15 +358,16 @@ public void testAppend() throws IOException { } /** - * Older clients may not send along a timestamp in the MutationProto. Check that we - * default correctly. + * Older clients may not send along a timestamp in the MutationProto. Check that we default + * correctly. */ @Test public void testAppendNoTimestamp() throws IOException { MutationProto mutation = getAppendMutation(null); Append append = ProtobufUtil.toAppend(mutation, null); assertEquals(HConstants.LATEST_TIMESTAMP, append.getTimestamp()); - append.getFamilyCellMap().values().forEach(cells -> cells.forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); + append.getFamilyCellMap().values().forEach(cells -> cells + .forEach(cell -> assertEquals(HConstants.LATEST_TIMESTAMP, cell.getTimestamp()))); } private MutationProto getAppendMutation(Long timestamp) { @@ -425,8 +411,8 @@ private static ProcedureProtos.Procedure createProcedure(long procId) { private static LockServiceProtos.LockedResource createLockedResource( LockServiceProtos.LockedResourceType resourceType, String resourceName, - LockServiceProtos.LockType lockType, - ProcedureProtos.Procedure exclusiveLockOwnerProcedure, int sharedLockCount) { + LockServiceProtos.LockType lockType, ProcedureProtos.Procedure exclusiveLockOwnerProcedure, + int sharedLockCount) { LockServiceProtos.LockedResource.Builder build = LockServiceProtos.LockedResource.newBuilder(); build.setResourceType(resourceType); build.setResourceName(resourceName); @@ -448,94 +434,70 @@ public void testProcedureInfo() { ProcedureProtos.Procedure procedure = builder.build(); String procJson = ProtobufUtil.toProcedureJson(Lists.newArrayList(procedure)); - assertEquals("[{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"1\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"," - + "\"stateMessage\":[{\"value\":\"QQ==\"}]" - + "}]", procJson); + assertEquals("[{" + "\"className\":\"java.lang.Object\"," + "\"procId\":\"1\"," + + "\"submittedTime\":\"0\"," + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"," + + "\"stateMessage\":[{\"value\":\"QQ==\"}]" + "}]", + procJson); } @Test public void testServerLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.SERVER, "server", - LockServiceProtos.LockType.SHARED, null, 2); + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.SERVER, "server", + LockServiceProtos.LockType.SHARED, null, 2); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"SERVER\"," - + "\"resourceName\":\"server\"," - + "\"lockType\":\"SHARED\"," - + "\"sharedLockCount\":2" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"SERVER\"," + "\"resourceName\":\"server\"," + + "\"lockType\":\"SHARED\"," + "\"sharedLockCount\":2" + "}]", + lockJson); } @Test public void testNamespaceLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.NAMESPACE, "ns", - LockServiceProtos.LockType.EXCLUSIVE, createProcedure(2), 0); + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.NAMESPACE, "ns", + LockServiceProtos.LockType.EXCLUSIVE, createProcedure(2), 0); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"NAMESPACE\"," - + "\"resourceName\":\"ns\"," - + "\"lockType\":\"EXCLUSIVE\"," - + "\"exclusiveLockOwnerProcedure\":{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"2\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"" - + "}," - + "\"sharedLockCount\":0" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"NAMESPACE\"," + "\"resourceName\":\"ns\"," + + "\"lockType\":\"EXCLUSIVE\"," + "\"exclusiveLockOwnerProcedure\":{" + + "\"className\":\"java.lang.Object\"," + "\"procId\":\"2\"," + "\"submittedTime\":\"0\"," + + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"" + "}," + "\"sharedLockCount\":0" + + "}]", + lockJson); } @Test public void testTableLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.TABLE, "table", - LockServiceProtos.LockType.SHARED, null, 2); + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.TABLE, "table", + LockServiceProtos.LockType.SHARED, null, 2); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"TABLE\"," - + "\"resourceName\":\"table\"," - + "\"lockType\":\"SHARED\"," - + "\"sharedLockCount\":2" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"TABLE\"," + "\"resourceName\":\"table\"," + + "\"lockType\":\"SHARED\"," + "\"sharedLockCount\":2" + "}]", + lockJson); } @Test public void testRegionLockInfo() { - LockServiceProtos.LockedResource resource = createLockedResource( - LockServiceProtos.LockedResourceType.REGION, "region", - LockServiceProtos.LockType.EXCLUSIVE, createProcedure(3), 0); + LockServiceProtos.LockedResource resource = + createLockedResource(LockServiceProtos.LockedResourceType.REGION, "region", + LockServiceProtos.LockType.EXCLUSIVE, createProcedure(3), 0); String lockJson = ProtobufUtil.toLockJson(Lists.newArrayList(resource)); - assertEquals("[{" - + "\"resourceType\":\"REGION\"," - + "\"resourceName\":\"region\"," - + "\"lockType\":\"EXCLUSIVE\"," - + "\"exclusiveLockOwnerProcedure\":{" - + "\"className\":\"java.lang.Object\"," - + "\"procId\":\"3\"," - + "\"submittedTime\":\"0\"," - + "\"state\":\"RUNNABLE\"," - + "\"lastUpdate\":\"0\"" - + "}," - + "\"sharedLockCount\":0" - + "}]", lockJson); + assertEquals("[{" + "\"resourceType\":\"REGION\"," + "\"resourceName\":\"region\"," + + "\"lockType\":\"EXCLUSIVE\"," + "\"exclusiveLockOwnerProcedure\":{" + + "\"className\":\"java.lang.Object\"," + "\"procId\":\"3\"," + "\"submittedTime\":\"0\"," + + "\"state\":\"RUNNABLE\"," + "\"lastUpdate\":\"0\"" + "}," + "\"sharedLockCount\":0" + + "}]", + lockJson); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encode/decode tags is set to true. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encode/decode tags is set to true. */ @Test public void testCellConversionWithTags() { @@ -546,7 +508,7 @@ public void testCellConversionWithTags() { Cell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(1, decodedTags.size()); + assertEquals(1, decodedTags.size()); Tag decodedTag = decodedTags.get(0); assertEquals(TAG_TYPE, decodedTag.getType()); assertEquals(TAG_STR, Tag.getValueAsString(decodedTag)); @@ -566,14 +528,14 @@ private Cell getCellWithTags() { private Cell getCellFromProtoResult(CellProtos.Cell protoCell, boolean decodeTags) { ExtendedCellBuilder decodedBuilder = - ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); + ExtendedCellBuilderFactory.create(CellBuilderType.DEEP_COPY); return ProtobufUtil.toCell(decodedBuilder, protoCell, decodeTags); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encode/decode tags is set to false. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encode/decode tags is set to false. */ @Test public void testCellConversionWithoutTags() { @@ -583,14 +545,13 @@ public void testCellConversionWithoutTags() { Cell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encoding of tags is set to false - * and decoding of tags is set to true. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encoding of tags is set to false and decoding of tags is set to true. */ @Test public void testTagEncodeFalseDecodeTrue() { @@ -600,14 +561,13 @@ public void testTagEncodeFalseDecodeTrue() { Cell decodedCell = getCellFromProtoResult(protoCell, true); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } /** * Test {@link ProtobufUtil#toCell(Cell, boolean)} and - * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion - * methods when it contains tags and encoding of tags is set to true - * and decoding of tags is set to false. + * {@link ProtobufUtil#toCell(ExtendedCellBuilder, CellProtos.Cell, boolean)} conversion methods + * when it contains tags and encoding of tags is set to true and decoding of tags is set to false. */ @Test public void testTagEncodeTrueDecodeFalse() { @@ -617,6 +577,6 @@ public void testTagEncodeTrueDecodeFalse() { Cell decodedCell = getCellFromProtoResult(protoCell, false); List decodedTags = PrivateCellUtil.getTags(decodedCell); - assertEquals(0, decodedTags.size()); + assertEquals(0, decodedTags.size()); } } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java index 808e245062a1..d2d68a4ad375 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/BuilderStyleTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,32 +27,17 @@ import java.util.Set; /** - * Utility class to check whether a given class conforms to builder-style: - * Foo foo = - * new Foo() - * .setBar(bar) - * .setBaz(baz) + * Utility class to check whether a given class conforms to builder-style: Foo foo = new Foo() + * .setBar(bar) .setBaz(baz) */ public final class BuilderStyleTest { - private BuilderStyleTest() {} + private BuilderStyleTest() { + } /* * If a base class Foo declares a method setFoo() returning Foo, then the subclass should - * re-declare the methods overriding the return class with the subclass: - * - * class Foo { - * Foo setFoo() { - * .. - * return this; - * } - * } - * - * class Bar { - * Bar setFoo() { - * return (Bar) super.setFoo(); - * } - * } - * + * re-declare the methods overriding the return class with the subclass: class Foo { Foo setFoo() + * { .. return this; } } class Bar { Bar setFoo() { return (Bar) super.setFoo(); } } */ @SuppressWarnings("rawtypes") public static void assertClassesAreBuilderStyle(Class... classes) { @@ -66,13 +51,13 @@ public static void assertClassesAreBuilderStyle(Class... classes) { } Class ret = method.getReturnType(); if (method.getName().startsWith("set") || method.getName().startsWith("add")) { - System.out.println(" " + clazz.getSimpleName() + "." + method.getName() + "() : " - + ret.getSimpleName()); + System.out.println( + " " + clazz.getSimpleName() + "." + method.getName() + "() : " + ret.getSimpleName()); // because of subclass / super class method overrides, we group the methods fitting the // same signatures because we get two method definitions from java reflection: // Mutation.setDurability() : Mutation - // Delete.setDurability() : Mutation + // Delete.setDurability() : Mutation // Delete.setDurability() : Delete String sig = method.getName(); for (Class param : method.getParameterTypes()) { diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java index 314cae9e175b..dd335f9b1b41 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/PoolMapTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java index 2fd73caea46a..a7ae74922635 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestRoundRobinPoolMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java index 2f497c6fdfb5..50bb5ebd2d49 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/util/TestThreadLocalPoolMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java index a8b7644c52af..a5177abd6f88 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZNodePaths.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml index e4f1a03744f8..2d1b33ae15ea 100644 --- a/hbase-common/pom.xml +++ b/hbase-common/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,113 +31,6 @@ Apache HBase - Common Common functionality for HBase - - - - src/main/resources/ - - hbase-default.xml - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - - maven-assembly-plugin - - true - - - - maven-antrun-plugin - - - process-resources - - - - - - - run - - - - - generate-Version-information - generate-sources - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - versionInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - hbase-default.xml - - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase @@ -260,6 +153,112 @@ + + + + src/main/resources/ + + hbase-default.xml + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + + maven-assembly-plugin + + true + + + + maven-antrun-plugin + + + + run + + process-resources + + + + + + + + + generate-Version-information + + run + + generate-sources + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + versionInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + hbase-default.xml + + + + + net.revelc.code + warbucks-maven-plugin + + + + @@ -272,10 +271,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -309,14 +308,14 @@ - hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -334,10 +333,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-aircompressor Apache HBase - Compression - Aircompressor Pure Java compression support using Aircompressor codecs - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -165,6 +131,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java index c448f58dbf55..de687fb1f0d0 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopCompressor.java @@ -1,24 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.Compressor; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -26,8 +27,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.airlift.compress.Compressor; - /** * Hadoop compressor glue for aircompressor compressors. */ @@ -165,7 +164,7 @@ public void reinit(Configuration conf) { public void reset() { LOG.trace("reset"); try { - compressor = (T)(compressor.getClass().getDeclaredConstructor().newInstance()); + compressor = (T) (compressor.getClass().getDeclaredConstructor().newInstance()); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java index f5f5b83ab300..b0e828c9db83 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/HadoopDecompressor.java @@ -1,31 +1,30 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.Decompressor; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import io.airlift.compress.Decompressor; - /** * Hadoop decompressor glue for aircompressor decompressors. */ @@ -104,7 +103,7 @@ public boolean needsDictionary() { public void reset() { LOG.trace("reset"); try { - decompressor = (T)(decompressor.getClass().getDeclaredConstructor().newInstance()); + decompressor = (T) (decompressor.getClass().getDeclaredConstructor().newInstance()); } catch (Exception e) { throw new RuntimeException(e); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java index c1766dc0456a..6bb7b1c721a7 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/Lz4Codec.java @@ -1,25 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.lz4.Lz4Compressor; +import io.airlift.compress.lz4.Lz4Decompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -32,9 +34,6 @@ import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.lz4.Lz4Compressor; -import io.airlift.compress.lz4.Lz4Decompressor; - /** * Hadoop Lz4 codec implemented with aircompressor. *

    diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java index 3e5ab049e954..31f1bda30e6d 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/LzoCodec.java @@ -1,25 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.lzo.LzoCompressor; +import io.airlift.compress.lzo.LzoDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -32,9 +34,6 @@ import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.lzo.LzoCompressor; -import io.airlift.compress.lzo.LzoDecompressor; - /** * Hadoop Lzo codec implemented with aircompressor. *

    diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java index e325b8b625aa..2f066dd34375 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/SnappyCodec.java @@ -1,25 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.snappy.SnappyCompressor; +import io.airlift.compress.snappy.SnappyDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -32,9 +34,6 @@ import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.snappy.SnappyCompressor; -import io.airlift.compress.snappy.SnappyDecompressor; - /** * Hadoop snappy codec implemented with aircompressor. *

    diff --git a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java index a25943fbb483..0fc35b46583e 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/main/java/org/apache/hadoop/hbase/io/compress/aircompressor/ZstdCodec.java @@ -1,25 +1,27 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; +import io.airlift.compress.zstd.ZstdCompressor; +import io.airlift.compress.zstd.ZstdDecompressor; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -32,21 +34,17 @@ import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; -import io.airlift.compress.zstd.ZstdCompressor; -import io.airlift.compress.zstd.ZstdDecompressor; - /** * Hadoop codec implementation for Zstandard, implemented with aircompressor. *

    - * Unlike the other codecs this one should be considered as under development and unstable - * (as in changing), reflecting the status of aircompressor's zstandard implementation. + * Unlike the other codecs this one should be considered as under development and unstable (as in + * changing), reflecting the status of aircompressor's zstandard implementation. *

    - * NOTE: This codec is NOT data format compatible with the Hadoop native zstandard codec. - * There are issues with both framing and limitations of the aircompressor zstandard - * compressor. This codec can be used as an alternative to the native codec, if the native - * codec cannot be made available and/or an eventual migration will never be necessary - * (i.e. this codec's performance meets anticipated requirements). Once you begin using this - * alternative you will be locked into it. + * NOTE: This codec is NOT data format compatible with the Hadoop native zstandard codec. There are + * issues with both framing and limitations of the aircompressor zstandard compressor. This codec + * can be used as an alternative to the native codec, if the native codec cannot be made available + * and/or an eventual migration will never be necessary (i.e. this codec's performance meets + * anticipated requirements). Once you begin using this alternative you will be locked into it. */ @InterfaceAudience.Private public class ZstdCodec implements Configurable, CompressionCodec { diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java index 5ddee513cd6b..aa60f279170a 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLz4 extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZ4); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java index 143db468e58b..d3d23823c9f2 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionLzo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLzo extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZO); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java index e9b08cb937f5..eb9e6f11e3bd 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionSnappy extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.SNAPPY); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java index c3a52d808aa8..ba10b56ada47 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestHFileCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionZstd extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.ZSTD); } diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java index db1cc7214fd1..45f7c9058d6a 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -39,7 +40,7 @@ public void testLz4CodecSmall() throws Exception { public void testLz4CodecLarge() throws Exception { codecLargeTest(new Lz4Codec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new Lz4Codec(), 2); - codecLargeTest(new Lz4Codec(), 10); // high compressability + codecLargeTest(new Lz4Codec(), 10); // high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java index bd1b75aecc1b..59f37c861643 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestLzoCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -39,7 +40,7 @@ public void testLzoCodecSmall() throws Exception { public void testLzoCodecLarge() throws Exception { codecLargeTest(new LzoCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new LzoCodec(), 2); - codecLargeTest(new LzoCodec(), 10); // very high compressability + codecLargeTest(new LzoCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java index 98e628121c9b..318b42113027 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestSnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -39,7 +40,7 @@ public void testSnappyCodecSmall() throws Exception { public void testSnappyCodecLarge() throws Exception { codecLargeTest(new SnappyCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new SnappyCodec(), 2); - codecLargeTest(new SnappyCodec(), 10); // very high compressability + codecLargeTest(new SnappyCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java index 23d7777f07c7..710bd5750428 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java index 997d6873c617..f3da86794ee0 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionLzo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java index 924e46a77eee..e1bde41687ba 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java index 0de6de2b027c..4fff65da43ed 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestWALCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java index 707fee2aded4..9b1e003e2bbc 100644 --- a/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-aircompressor/src/test/java/org/apache/hadoop/hbase/io/compress/aircompressor/TestZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.aircompressor; @@ -39,7 +40,7 @@ public void testZstdCodecSmall() throws Exception { public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new ZstdCodec(), 2); - codecLargeTest(new ZstdCodec(), 10); // very high compressability + codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-lz4/pom.xml b/hbase-compression/hbase-compression-lz4/pom.xml index c891a6a2a7c1..d4720a37f167 100644 --- a/hbase-compression/hbase-compression-lz4/pom.xml +++ b/hbase-compression/hbase-compression-lz4/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-lz4 Apache HBase - Compression - LZ4 Pure Java compression support using lz4-java - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -154,6 +131,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java index a218954b6f2c..e6420e9f8b3d 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Codec.java @@ -1,25 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java index 71b5164f116e..649cf4908b04 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Compressor.java @@ -1,24 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.nio.ByteBuffer; - +import net.jpountz.lz4.LZ4Compressor; +import net.jpountz.lz4.LZ4Factory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -27,9 +29,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import net.jpountz.lz4.LZ4Compressor; -import net.jpountz.lz4.LZ4Factory; - /** * Hadoop compressor glue for lz4-java. */ diff --git a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java index efb8c846d923..b897423f761e 100644 --- a/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java +++ b/hbase-compression/hbase-compression-lz4/src/main/java/org/apache/hadoop/hbase/io/compress/lz4/Lz4Decompressor.java @@ -1,33 +1,32 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; import java.io.IOException; import java.nio.ByteBuffer; - +import net.jpountz.lz4.LZ4Factory; +import net.jpountz.lz4.LZ4SafeDecompressor; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import net.jpountz.lz4.LZ4Factory; -import net.jpountz.lz4.LZ4SafeDecompressor; - /** * Hadoop decompressor glue for lz4-java. */ diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java index 8f61829f59ad..d8d0f5c2730d 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestHFileCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLz4 extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZ4); } diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java index 0c237e105bac..ff9cec67285e 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestLz4Codec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.lz4; @@ -38,8 +39,8 @@ public void testLz4CodecSmall() throws Exception { @Test public void testLz4CodecLarge() throws Exception { codecLargeTest(new Lz4Codec(), 1.1); // poor compressability, expansion with this codec - codecLargeTest(new Lz4Codec(), 2); - codecLargeTest(new Lz4Codec(), 10); // very high compressability + codecLargeTest(new Lz4Codec(), 2); + codecLargeTest(new Lz4Codec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java index fdf9b0a9cc14..734be0c2851f 100644 --- a/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java +++ b/hbase-compression/hbase-compression-lz4/src/test/java/org/apache/hadoop/hbase/io/compress/lz4/TestWALCompressionLz4.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-snappy/pom.xml b/hbase-compression/hbase-compression-snappy/pom.xml index 069896dbf163..8da6b79aa17a 100644 --- a/hbase-compression/hbase-compression-snappy/pom.xml +++ b/hbase-compression/hbase-compression-snappy/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-snappy Apache HBase - Compression - Snappy Pure Java compression support using Xerial Snappy - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -154,6 +131,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java index e7c62c507c16..6e4b951d294f 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCodec.java @@ -1,25 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java index fd9994265086..3493a804f38e 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyCompressor.java @@ -1,24 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -26,7 +26,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.xerial.snappy.Snappy; /** diff --git a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java index e9119216168f..0987e550b74f 100644 --- a/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java +++ b/hbase-compression/hbase-compression-snappy/src/main/java/org/apache/hadoop/hbase/io/compress/xerial/SnappyDecompressor.java @@ -1,30 +1,29 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - import org.xerial.snappy.Snappy; /** diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java index 0343e8b0a5a2..4dbf77675128 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestHFileCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionSnappy extends HFileTestBase { @ClassRule @@ -50,7 +50,7 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.SNAPPY); } diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java index e882d79df52c..64e31ba47e6a 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestSnappyCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xerial; @@ -39,7 +40,7 @@ public void testSnappyCodecSmall() throws Exception { public void testSnappyCodecLarge() throws Exception { codecLargeTest(new SnappyCodec(), 1.1); // poor compressability, expansion with this codec codecLargeTest(new SnappyCodec(), 2); - codecLargeTest(new SnappyCodec(), 10); // very high compressability + codecLargeTest(new SnappyCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java index ba59b6525340..5afd5b90811b 100644 --- a/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java +++ b/hbase-compression/hbase-compression-snappy/src/test/java/org/apache/hadoop/hbase/io/compress/xerial/TestWALCompressionSnappy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-xz/pom.xml b/hbase-compression/hbase-compression-xz/pom.xml index 23452af81836..24229f6af3ec 100644 --- a/hbase-compression/hbase-compression-xz/pom.xml +++ b/hbase-compression/hbase-compression-xz/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-xz Apache HBase - Compression - XZ Pure Java compression support using XZ for Java - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -138,6 +115,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java index 99f29a2695bd..4e15c1405ed4 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCodec.java @@ -1,25 +1,25 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.compress.BlockCompressorStream; diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java index dd4d9990954c..3551003b2cb3 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaCompressor.java @@ -1,28 +1,28 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; import java.io.IOException; import java.nio.BufferOverflowException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.hbase.io.ByteBufferOutputStream; +import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Compressor; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -236,7 +236,7 @@ public void setInput(byte[] b, int off, int len) { // Package private int maxCompressedLength(int len) { - return len + 32 + (len/6); + return len + 32 + (len / 6); } } diff --git a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java index be450b3be162..27a14bcb23e7 100644 --- a/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java +++ b/hbase-compression/hbase-compression-xz/src/main/java/org/apache/hadoop/hbase/io/compress/xz/LzmaDecompressor.java @@ -1,24 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.hbase.io.ByteBufferInputStream; import org.apache.hadoop.hbase.io.compress.CompressionUtil; import org.apache.hadoop.io.compress.Decompressor; diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java index 617e02dcbf06..bfe6a590b55a 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestHFileCompressionLzma.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionLzma extends HFileTestBase { @ClassRule @@ -52,16 +52,16 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.LZMA); } @Test public void testReconfLevels() throws Exception { Path path_1 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".1.hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".1.hfile"); Path path_2 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".2.hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".2.hfile"); conf.setInt(LzmaCodec.LZMA_LEVEL_KEY, 1); doTest(conf, path_1, Compression.Algorithm.LZMA); long len_1 = FS.getFileStatus(path_1).getLen(); diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java index 63978abe838b..d13e80a134ea 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestLzmaCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.xz; @@ -39,8 +40,8 @@ public void testLzmaCodecSmall() throws Exception { @Test public void testLzmaCodecLarge() throws Exception { codecLargeTest(new LzmaCodec(), 1.1); // poor compressability - codecLargeTest(new LzmaCodec(), 2); - codecLargeTest(new LzmaCodec(), 10); // very high compressability + codecLargeTest(new LzmaCodec(), 2); + codecLargeTest(new LzmaCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java index 89ce68b0600e..396b01402d09 100644 --- a/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java +++ b/hbase-compression/hbase-compression-xz/src/test/java/org/apache/hadoop/hbase/io/compress/xz/TestWALCompressionLzma.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-zstd/pom.xml b/hbase-compression/hbase-compression-zstd/pom.xml index cb416517c561..f3751d298f10 100644 --- a/hbase-compression/hbase-compression-zstd/pom.xml +++ b/hbase-compression/hbase-compression-zstd/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-compression org.apache.hbase + hbase-compression 2.5.0-SNAPSHOT .. hbase-compression-zstd Apache HBase - Compression - ZStandard Pure Java compression support using zstd-jni - - - - - maven-surefire-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - - - - maven-assembly-plugin - - true - - - - - @@ -154,6 +131,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + + + + maven-surefire-plugin + + + net.revelc.code + warbucks-maven-plugin + + + build-with-jdk11 diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java index 07b26d0c4bf0..f7af083ddd88 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -115,8 +116,7 @@ public String getDefaultExtension() { static int getLevel(Configuration conf) { return conf.getInt(ZSTD_LEVEL_KEY, - conf.getInt( - CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, + conf.getInt(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY, CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT)); } @@ -141,10 +141,8 @@ static byte[] getDictionary(final Configuration conf) { // Reference: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md static boolean isDictionary(byte[] dictionary) { - return (dictionary[0] == (byte)0x37 && - dictionary[1] == (byte)0xA4 && - dictionary[2] == (byte)0x30 && - dictionary[3] == (byte)0xEC); + return (dictionary[0] == (byte) 0x37 && dictionary[1] == (byte) 0xA4 + && dictionary[2] == (byte) 0x30 && dictionary[3] == (byte) 0xEC); } static int getDictionaryId(byte[] dictionary) { diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java index deaf7e1ea833..e2c483cbcf18 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdCompressor.java @@ -1,24 +1,26 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictCompress; import java.io.IOException; import java.nio.ByteBuffer; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.compress.CanReinit; import org.apache.hadoop.hbase.io.compress.CompressionUtil; @@ -27,9 +29,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdDictCompress; - /** * Hadoop compressor glue for zstd-jni. */ diff --git a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java index dfa37db636ae..f7934d9f5500 100644 --- a/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java +++ b/hbase-compression/hbase-compression-zstd/src/main/java/org/apache/hadoop/hbase/io/compress/zstd/ZstdDecompressor.java @@ -1,21 +1,24 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdDictDecompress; import java.io.IOException; import java.nio.ByteBuffer; import org.apache.hadoop.conf.Configuration; @@ -25,8 +28,6 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.github.luben.zstd.Zstd; -import com.github.luben.zstd.ZstdDictDecompress; /** * Hadoop decompressor glue for zstd-java. diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java index 55a197b45f66..55b9682d24dc 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestHFileCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({IOTests.class, SmallTests.class}) +@Category({ IOTests.class, SmallTests.class }) public class TestHFileCompressionZstd extends HFileTestBase { @ClassRule @@ -52,16 +52,16 @@ public static void setUpBeforeClass() throws Exception { @Test public void test() throws Exception { Path path = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".hfile"); doTest(conf, path, Compression.Algorithm.ZSTD); } @Test public void testReconfLevels() throws Exception { Path path_1 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".1.hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".1.hfile"); Path path_2 = new Path(TEST_UTIL.getDataTestDir(), - HBaseTestingUtility.getRandomUUID().toString() + ".2.hfile"); + HBaseTestingUtility.getRandomUUID().toString() + ".2.hfile"); conf.setInt(ZstdCodec.ZSTD_LEVEL_KEY, 1); doTest(conf, path_1, Compression.Algorithm.ZSTD); long len_1 = FS.getFileStatus(path_1).getLen(); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java index e75de9b9c466..ffce70943963 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestWALCompressionZstd.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java index bf1c78cbc17f..bdff5da87c9c 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdCodec.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -41,8 +42,8 @@ public void testZstdCodecSmall() throws Exception { @Test public void testZstdCodecLarge() throws Exception { codecLargeTest(new ZstdCodec(), 1.1); // poor compressability - codecLargeTest(new ZstdCodec(), 2); - codecLargeTest(new ZstdCodec(), 10); // very high compressability + codecLargeTest(new ZstdCodec(), 2); + codecLargeTest(new ZstdCodec(), 10); // very high compressability } @Test diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java index 0a17ef997d20..ef3f339160eb 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionary.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.io.compress.zstd; @@ -40,7 +41,7 @@ public class TestZstdDictionary extends CompressionTestBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZstdDictionary.class); + HBaseClassTestRule.forClass(TestZstdDictionary.class); private static final String DICTIONARY_PATH = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict"; // zstd.test.data compressed with zstd.test.dict at level 3 will produce a result of @@ -53,7 +54,7 @@ public class TestZstdDictionary extends CompressionTestBase { public static void setUp() throws Exception { Configuration conf = new Configuration(); TEST_DATA = DictionaryCache.loadFromResource(conf, - DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024*1024); + DictionaryCache.RESOURCE_SCHEME + "zstd.test.data", /* maxSize */ 1024 * 1024); assertNotNull("Failed to load test data", TEST_DATA); } @@ -76,13 +77,13 @@ public void test() throws Exception { public static void main(String[] args) throws IOException { // Write 1000 1k blocks for training to the specified file // Train with: - // zstd --train -B1024 -o + // zstd --train -B1024 -o if (args.length < 1) { System.err.println("Usage: TestZstdCodec "); System.exit(-1); } final RandomDistribution.DiscreteRNG rng = - new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, 2); + new RandomDistribution.Zipf(new Random(), 0, Byte.MAX_VALUE, 2); final File outFile = new File(args[0]); final byte[] buffer = new byte[1024]; System.out.println("Generating " + outFile); diff --git a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java index dff3848f5603..82851493b915 100644 --- a/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java +++ b/hbase-compression/hbase-compression-zstd/src/test/java/org/apache/hadoop/hbase/io/compress/zstd/TestZstdDictionarySplitMerge.java @@ -53,7 +53,7 @@ public class TestZstdDictionarySplitMerge { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestZstdDictionarySplitMerge.class); + HBaseClassTestRule.forClass(TestZstdDictionarySplitMerge.class); private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); private static Configuration conf; @@ -83,11 +83,10 @@ public void test() throws Exception { final byte[] cfName = Bytes.toBytes("info"); final String dictionaryPath = DictionaryCache.RESOURCE_SCHEME + "zstd.test.dict"; final TableDescriptor td = TableDescriptorBuilder.newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName) - .setCompressionType(Compression.Algorithm.ZSTD) - .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath) - .build()) - .build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(cfName) + .setCompressionType(Compression.Algorithm.ZSTD) + .setConfiguration(ZstdCodec.ZSTD_DICTIONARY_KEY, dictionaryPath).build()) + .build(); final Admin admin = TEST_UTIL.getAdmin(); admin.createTable(td, new byte[][] { Bytes.toBytes(1) }); TEST_UTIL.waitTableAvailable(tableName); @@ -108,6 +107,7 @@ public void test() throws Exception { public boolean evaluate() throws Exception { return TEST_UTIL.getMiniHBaseCluster().getRegions(tableName).size() == 3; } + @Override public String explainFailure() throws Exception { return "Split has not finished yet"; @@ -120,7 +120,7 @@ public String explainFailure() throws Exception { RegionInfo regionA = null; RegionInfo regionB = null; - for (RegionInfo region: admin.getRegions(tableName)) { + for (RegionInfo region : admin.getRegions(tableName)) { if (region.getStartKey().length == 0) { regionA = region; } else if (Bytes.equals(region.getStartKey(), Bytes.toBytes(1))) { @@ -129,18 +129,16 @@ public String explainFailure() throws Exception { } assertNotNull(regionA); assertNotNull(regionB); - admin.mergeRegionsAsync(new byte[][] { - regionA.getRegionName(), - regionB.getRegionName() - }, false).get(30, TimeUnit.SECONDS); + admin + .mergeRegionsAsync(new byte[][] { regionA.getRegionName(), regionB.getRegionName() }, false) + .get(30, TimeUnit.SECONDS); assertEquals(2, admin.getRegions(tableName).size()); ServerName expected = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0).getServerName(); assertEquals(expected, TEST_UTIL.getConnection().getRegionLocator(tableName) - .getRegionLocation(Bytes.toBytes(1), true).getServerName()); - try (AsyncConnection asyncConn = - ConnectionFactory.createAsyncConnection(conf).get()) { + .getRegionLocation(Bytes.toBytes(1), true).getServerName()); + try (AsyncConnection asyncConn = ConnectionFactory.createAsyncConnection(conf).get()) { assertEquals(expected, asyncConn.getRegionLocator(tableName) - .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); + .getRegionLocation(Bytes.toBytes(1), true).get().getServerName()); } TEST_UTIL.verifyNumericRows(t, cfName, 0, 100_000, 0); } diff --git a/hbase-compression/pom.xml b/hbase-compression/pom.xml index 4f65df1ef73f..694a4e6703b7 100644 --- a/hbase-compression/pom.xml +++ b/hbase-compression/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-compression + pom Apache HBase - Compression Pure Java compression support parent - pom hbase-compression-aircompressor @@ -80,10 +80,10 @@ spotbugs-maven-plugin - false spotbugs + false ${project.basedir}/../dev-support/spotbugs-exclude.xml diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml index 83ae8507b9f1..64fd21772646 100644 --- a/hbase-endpoint/pom.xml +++ b/hbase-endpoint/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -33,51 +33,6 @@ true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - ${basedir}/../hbase-protocol/src/main/protobuf - - - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - org.apache.hbase.thirdparty @@ -271,6 +226,51 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + + ${basedir}/../hbase-protocol/src/main/protobuf + + + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java index 8101654ab83a..827e384fd11f 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationClient.java @@ -24,7 +24,6 @@ import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; - import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; @@ -35,7 +34,6 @@ import java.util.NavigableSet; import java.util.TreeMap; import java.util.concurrent.atomic.AtomicLong; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -58,29 +56,26 @@ import org.slf4j.LoggerFactory; /** - * This client class is for invoking the aggregate functions deployed on the - * Region Server side via the AggregateService. This class will implement the - * supporting functionality for summing/processing the individual results - * obtained from the AggregateService for each region. + * This client class is for invoking the aggregate functions deployed on the Region Server side via + * the AggregateService. This class will implement the supporting functionality for + * summing/processing the individual results obtained from the AggregateService for each region. *

    - * This will serve as the client side handler for invoking the aggregate - * functions. - * For all aggregate functions, + * This will serve as the client side handler for invoking the aggregate functions. For all + * aggregate functions, *

      *
    • start row < end row is an essential condition (if they are not * {@link HConstants#EMPTY_BYTE_ARRAY}) - *
    • Column family can't be null. In case where multiple families are - * provided, an IOException will be thrown. An optional column qualifier can - * also be defined.
    • - *
    • For methods to find maximum, minimum, sum, rowcount, it returns the - * parameter type. For average and std, it returns a double value. For row - * count, it returns a long value.
    • + *
    • Column family can't be null. In case where multiple families are provided, an IOException + * will be thrown. An optional column qualifier can also be defined.
    • + *
    • For methods to find maximum, minimum, sum, rowcount, it returns the parameter type. For + * average and std, it returns a double value. For row count, it returns a long value.
    • *
    - *

    Call {@link #close()} when done. + *

    + * Call {@link #close()} when done. */ @InterfaceAudience.Public public class AggregationClient implements Closeable { - // TODO: This class is not used. Move to examples? + // TODO: This class is not used. Move to examples? private static final Logger log = LoggerFactory.getLogger(AggregationClient.class); private final Connection connection; @@ -152,18 +147,17 @@ public void close() throws IOException { } /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. + * It gives the maximum value of a column for a given column family for the given range. In case + * qualifier is null, a max of all values for the given family is returned. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return max val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public R max( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) + public R + max(final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return max(table, ci, scan); @@ -171,19 +165,17 @@ public R max( } /** - * It gives the maximum value of a column for a given column family for the - * given range. In case qualifier is null, a max of all values for the given - * family is returned. + * It gives the maximum value of a column for a given column family for the given range. In case + * qualifier is null, a max of all values for the given family is returned. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return max val <> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R max(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R max(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MaxCallBack implements Batch.Callback { R max = null; @@ -199,41 +191,40 @@ public synchronized void update(byte[] region, byte[] row, R result) { } MaxCallBack aMaxCallBack = new MaxCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMax(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() > 0) { - ByteString b = response.getFirstPart(0); - Q q = getParsedGenericInstance(ci.getClass(), 3, b); - return ci.getCellValueFromProto(q); - } - return null; + new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMax(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, aMaxCallBack); + if (response.getFirstPartCount() > 0) { + ByteString b = response.getFirstPart(0); + Q q = getParsedGenericInstance(ci.getClass(), 3, b); + return ci.getCellValueFromProto(q); + } + return null; + } + }, aMaxCallBack); return aMaxCallBack.getMax(); } /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. + * It gives the minimum value of a column for a given column family for the given range. In case + * qualifier is null, a min of all values for the given family is returned. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return min val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public R min( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) + public R + min(final TableName tableName, final ColumnInterpreter ci, final Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return min(table, ci, scan); @@ -241,19 +232,17 @@ public R min( } /** - * It gives the minimum value of a column for a given column family for the - * given range. In case qualifier is null, a min of all values for the given - * family is returned. + * It gives the minimum value of a column for a given column family for the given range. In case + * qualifier is null, a min of all values for the given family is returned. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return min val <R> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R min(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public R min(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class MinCallBack implements Batch.Callback { private R min = null; @@ -270,67 +259,65 @@ public synchronized void update(byte[] region, byte[] row, R result) { MinCallBack minCallBack = new MinCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public R call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMin(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() > 0) { - ByteString b = response.getFirstPart(0); - Q q = getParsedGenericInstance(ci.getClass(), 3, b); - return ci.getCellValueFromProto(q); - } - return null; + new Batch.Call() { + @Override + public R call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMin(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, minCallBack); + if (response.getFirstPartCount() > 0) { + ByteString b = response.getFirstPart(0); + Q q = getParsedGenericInstance(ci.getClass(), 3, b); + return ci.getCellValueFromProto(q); + } + return null; + } + }, minCallBack); log.debug("Min fom all regions is: " + minCallBack.getMinimum()); return minCallBack.getMinimum(); } /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * It gives the row count, by summing up the individual results obtained from regions. In case the + * qualifier is null, FirstKeyValueFilter is used to optimised the operation. In case qualifier is + * provided, I can't use the filter as it may set the flag to skip to next row, but the value read + * is not of the given filter: in this case, this particular row will not be counted ==> an + * error. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ public long rowCount( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return rowCount(table, ci, scan); } } /** - * It gives the row count, by summing up the individual results obtained from - * regions. In case the qualifier is null, FirstKeyValueFilter is used to - * optimised the operation. In case qualifier is provided, I can't use the - * filter as it may set the flag to skip to next row, but the value read is - * not of the given filter: in this case, this particular row will not be - * counted ==> an error. + * It gives the row count, by summing up the individual results obtained from regions. In case the + * qualifier is null, FirstKeyValueFilter is used to optimised the operation. In case qualifier is + * provided, I can't use the filter as it may set the flag to skip to next row, but the value read + * is not of the given filter: in this case, this particular row will not be counted ==> an + * error. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - long rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) + public long + rowCount(final Table table, final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, true); class RowNumCallback implements Batch.Callback { @@ -348,57 +335,56 @@ public void update(byte[] region, byte[] row, Long result) { RowNumCallback rowNum = new RowNumCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public Long call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getRowNum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); - ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); - bb.rewind(); - return bb.getLong(); + new Batch.Call() { + @Override + public Long call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getRowNum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, rowNum); + byte[] bytes = getBytesFromResponse(response.getFirstPart(0)); + ByteBuffer bb = ByteBuffer.allocate(8).put(bytes); + bb.rewind(); + return bb.getLong(); + } + }, rowNum); return rowNum.getRowNumCount(); } /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. + * It sums up the value returned from various regions. In case qualifier is null, summation of all + * the column qualifiers in the given family is done. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return sum <S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public S sum( - final TableName tableName, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public S + sum(final TableName tableName, final ColumnInterpreter ci, final Scan scan) + throws Throwable { try (Table table = connection.getTable(tableName)) { return sum(table, ci, scan); } } /** - * It sums up the value returned from various regions. In case qualifier is - * null, summation of all the column qualifiers in the given family is done. + * It sums up the value returned from various regions. In case qualifier is null, summation of all + * the column qualifiers in the given family is done. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return sum <S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - S sum(final Table table, final ColumnInterpreter ci, final Scan scan) - throws Throwable { + public S sum(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class SumCallBack implements Batch.Callback { @@ -415,38 +401,38 @@ public synchronized void update(byte[] region, byte[] row, S result) { } SumCallBack sumCallBack = new SumCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call() { - @Override - public S call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - // Not sure what is going on here why I have to do these casts. TODO. - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getSum(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - if (response.getFirstPartCount() == 0) { - return null; - } - ByteString b = response.getFirstPart(0); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - return s; + new Batch.Call() { + @Override + public S call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + // Not sure what is going on here why I have to do these casts. TODO. + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getSum(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); } - }, sumCallBack); + if (response.getFirstPartCount() == 0) { + return null; + } + ByteString b = response.getFirstPart(0); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + return s; + } + }, sumCallBack); return sumCallBack.getSumResult(); } /** - * It computes average while fetching sum and row count from all the - * corresponding regions. Approach is to compute a global sum of region level - * sum and rowcount and then compute the average. + * It computes average while fetching sum and row count from all the corresponding regions. + * Approach is to compute a global sum of region level sum and rowcount and then compute the + * average. * @param tableName the name of the table to scan * @param scan the HBase scan object to use to read data from HBase - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ private Pair getAvgArgs( final TableName tableName, final ColumnInterpreter ci, final Scan scan) @@ -457,17 +443,17 @@ private Pair - Pair getAvgArgs(final Table table, final ColumnInterpreter ci, - final Scan scan) throws Throwable { + private Pair + getAvgArgs(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class AvgCallBack implements Batch.Callback> { S sum = null; @@ -486,90 +472,85 @@ public synchronized void update(byte[] region, byte[] row, Pair result) AvgCallBack avgCallBack = new AvgCallBack(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { - @Override - public Pair call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getAvg(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - Pair pair = new Pair<>(null, 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - ByteString b = response.getFirstPart(0); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - pair.setFirst(s); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); + new Batch.Call>() { + @Override + public Pair call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getAvg(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + Pair pair = new Pair<>(null, 0L); + if (response.getFirstPartCount() == 0) { return pair; } - }, avgCallBack); + ByteString b = response.getFirstPart(0); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + pair.setFirst(s); + ByteBuffer bb = + ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, avgCallBack); return avgCallBack.getAvgArgs(); } /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. + * This is the client side interface/handle for calling the average method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the average and returs the double value. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - double avg(final TableName tableName, final ColumnInterpreter ci, - Scan scan) throws Throwable { + public double + avg(final TableName tableName, final ColumnInterpreter ci, Scan scan) + throws Throwable { Pair p = getAvgArgs(tableName, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); } /** - * This is the client side interface/handle for calling the average method for - * a given cf-cq combination. It was necessary to add one more call stack as - * its return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the average and returs the double value. + * This is the client side interface/handle for calling the average method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the average and returs the double value. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ public double avg( - final Table table, final ColumnInterpreter ci, Scan scan) - throws Throwable { + final Table table, final ColumnInterpreter ci, Scan scan) throws Throwable { Pair p = getAvgArgs(table, ci, scan); return ci.divideForAvg(p.getFirst(), p.getSecond()); } /** - * It computes a global standard deviation for a given column and its value. - * Standard deviation is square root of (average of squares - - * average*average). From individual regions, it obtains sum, square sum and - * number of rows. With these, the above values are computed to get the global - * std. + * It computes a global standard deviation for a given column and its value. Standard deviation is + * square root of (average of squares - average*average). From individual regions, it obtains sum, + * square sum and number of rows. With these, the above values are computed to get the global std. * @param table table to scan. * @param scan the HBase scan object to use to read data from HBase * @return standard deviations - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - private - Pair, Long> getStdArgs(final Table table, final ColumnInterpreter ci, - final Scan scan) throws Throwable { + private Pair, Long> + getStdArgs(final Table table, final ColumnInterpreter ci, final Scan scan) + throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); class StdCallback implements Batch.Callback, Long>> { long rowCountVal = 0L; @@ -595,75 +576,72 @@ public synchronized void update(byte[] region, byte[] row, Pair, Long> r StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call, Long>>() { - @Override - public Pair, Long> call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getStd(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } - Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); - if (response.getFirstPartCount() == 0) { - return pair; - } - List list = new ArrayList<>(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - ByteString b = response.getFirstPart(i); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - list.add(s); - } - pair.setFirst(list); - ByteBuffer bb = ByteBuffer.allocate(8).put( - getBytesFromResponse(response.getSecondPart())); - bb.rewind(); - pair.setSecond(bb.getLong()); + new Batch.Call, Long>>() { + @Override + public Pair, Long> call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getStd(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } + Pair, Long> pair = new Pair<>(new ArrayList<>(), 0L); + if (response.getFirstPartCount() == 0) { return pair; } - }, stdCallback); + List list = new ArrayList<>(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + ByteString b = response.getFirstPart(i); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + list.add(s); + } + pair.setFirst(list); + ByteBuffer bb = + ByteBuffer.allocate(8).put(getBytesFromResponse(response.getSecondPart())); + bb.rewind(); + pair.setSecond(bb.getLong()); + return pair; + } + }, stdCallback); return stdCallback.getStdParams(); } /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. + * This is the client side interface/handle for calling the std method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the std and returns the double value. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - double std(final TableName tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { + public double std( + final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return std(table, ci, scan); } } /** - * This is the client side interface/handle for calling the std method for a - * given cf-cq combination. It was necessary to add one more call stack as its - * return type should be a decimal value, irrespective of what - * columninterpreter says. So, this methods collects the necessary parameters - * to compute the std and returns the double value. + * This is the client side interface/handle for calling the std method for a given cf-cq + * combination. It was necessary to add one more call stack as its return type should be a decimal + * value, irrespective of what columninterpreter says. So, this methods collects the necessary + * parameters to compute the std and returns the double value. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public double std( - final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { + public double + std(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { Pair, Long> p = getStdArgs(table, ci, scan); double res = 0d; double avg = ci.divideForAvg(p.getFirst().get(0), p.getSecond()); @@ -674,22 +652,20 @@ public double st } /** - * It helps locate the region with median for a given column whose weight - * is specified in an optional column. - * From individual regions, it obtains sum of values and sum of weights. + * It helps locate the region with median for a given column whose weight is specified in an + * optional column. From individual regions, it obtains sum of values and sum of weights. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase - * @return pair whose first element is a map between start row of the region - * and (sum of values, sum of weights) for the region, the second element is - * (sum of values, sum of weights) for all the regions chosen - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @return pair whose first element is a map between start row of the region and (sum of values, + * sum of weights) for the region, the second element is (sum of values, sum of weights) + * for all the regions chosen + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ private - Pair>, List> - getMedianArgs(final Table table, - final ColumnInterpreter ci, final Scan scan) throws Throwable { + Pair>, List> getMedianArgs(final Table table, + final ColumnInterpreter ci, final Scan scan) throws Throwable { final AggregateRequest requestArg = validateArgAndGetPB(scan, ci, false); final NavigableMap> map = new TreeMap<>(Bytes.BYTES_COMPARATOR); class StdCallback implements Batch.Callback> { @@ -712,64 +688,63 @@ public synchronized void update(byte[] region, byte[] row, List result) { } StdCallback stdCallback = new StdCallback(); table.coprocessorService(AggregateService.class, scan.getStartRow(), scan.getStopRow(), - new Batch.Call>() { - @Override - public List call(AggregateService instance) throws IOException { - RpcController controller = new AggregationClientRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.getMedian(controller, requestArg, rpcCallback); - AggregateResponse response = rpcCallback.get(); - if (controller.failed()) { - throw new IOException(controller.errorText()); - } + new Batch.Call>() { + @Override + public List call(AggregateService instance) throws IOException { + RpcController controller = new AggregationClientRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.getMedian(controller, requestArg, rpcCallback); + AggregateResponse response = rpcCallback.get(); + if (controller.failed()) { + throw new IOException(controller.errorText()); + } - List list = new ArrayList<>(); - for (int i = 0; i < response.getFirstPartCount(); i++) { - ByteString b = response.getFirstPart(i); - T t = getParsedGenericInstance(ci.getClass(), 4, b); - S s = ci.getPromotedValueFromProto(t); - list.add(s); - } - return list; + List list = new ArrayList<>(); + for (int i = 0; i < response.getFirstPartCount(); i++) { + ByteString b = response.getFirstPart(i); + T t = getParsedGenericInstance(ci.getClass(), 4, b); + S s = ci.getPromotedValueFromProto(t); + list.add(s); } + return list; + } - }, stdCallback); + }, stdCallback); return stdCallback.getMedianParams(); } /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. + * This is the client side interface/handler for calling the median method for a given cf-cq + * combination. This method collects the necessary parameters to compute the median and returns + * the median. * @param tableName the name of the table to scan * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return R the median - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R median(final TableName tableName, ColumnInterpreter ci, - Scan scan) throws Throwable { + public R median( + final TableName tableName, ColumnInterpreter ci, Scan scan) throws Throwable { try (Table table = connection.getTable(tableName)) { return median(table, ci, scan); } } /** - * This is the client side interface/handler for calling the median method for a - * given cf-cq combination. This method collects the necessary parameters - * to compute the median and returns the median. + * This is the client side interface/handler for calling the median method for a given cf-cq + * combination. This method collects the necessary parameters to compute the median and returns + * the median. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return R the median - * @throws Throwable The caller is supposed to handle the exception as they are thrown - * & propagated to it. + * @throws Throwable The caller is supposed to handle the exception as they are thrown & + * propagated to it. */ - public - R median(final Table table, ColumnInterpreter ci, Scan scan) throws Throwable { + public R median(final Table table, + ColumnInterpreter ci, Scan scan) throws Throwable { Pair>, List> p = getMedianArgs(table, ci, scan); byte[] startRow = null; byte[] colFamily = scan.getFamilies()[0]; diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java index 6d804e43d73d..8687fa07d73d 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AggregationHelper.java @@ -19,13 +19,11 @@ import com.google.protobuf.ByteString; import com.google.protobuf.Message; - import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; @@ -39,7 +37,8 @@ */ @InterfaceAudience.Private public final class AggregationHelper { - private AggregationHelper() {} + private AggregationHelper() { + } /** * @param scan the HBase scan object to use to read data from HBase diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java index b3003c4e1411..c23ab81f19f4 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/client/coprocessor/AsyncAggregationClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,8 @@ */ @InterfaceAudience.Public public final class AsyncAggregationClient { - private AsyncAggregationClient() {} + private AsyncAggregationClient() { + } private static abstract class AbstractAggregationCallback implements CoprocessorCallback { @@ -82,8 +83,7 @@ public synchronized void onError(Throwable error) { completeExceptionally(error); } - protected abstract void aggregate(RegionInfo region, AggregateResponse resp) - throws IOException; + protected abstract void aggregate(RegionInfo region, AggregateResponse resp) throws IOException; @Override public synchronized void onRegionComplete(RegionInfo region, AggregateResponse resp) { @@ -200,8 +200,8 @@ protected R getFinalResult() { } public static - CompletableFuture rowCount(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + rowCount(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -267,8 +267,8 @@ protected S getFinalResult() { } public static - CompletableFuture avg(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + avg(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -304,8 +304,8 @@ protected Double getFinalResult() { } public static - CompletableFuture std(AsyncTable table, ColumnInterpreter ci, - Scan scan) { + CompletableFuture + std(AsyncTable table, ColumnInterpreter ci, Scan scan) { CompletableFuture future = new CompletableFuture<>(); AggregateRequest req; try { @@ -363,20 +363,20 @@ protected Double getFinalResult() { AbstractAggregationCallback> callback = new AbstractAggregationCallback>(future) { - private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); + private final NavigableMap map = new TreeMap<>(Bytes.BYTES_COMPARATOR); - @Override - protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { - if (resp.getFirstPartCount() > 0) { - map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex)); + @Override + protected void aggregate(RegionInfo region, AggregateResponse resp) throws IOException { + if (resp.getFirstPartCount() > 0) { + map.put(region.getStartKey(), getPromotedValueFromProto(ci, resp, firstPartIndex)); + } } - } - @Override - protected NavigableMap getFinalResult() { - return map; - } - }; + @Override + protected NavigableMap getFinalResult() { + return map; + } + }; table . coprocessorService(AggregateService::newStub, (stub, controller, rpcCallback) -> stub.getMedian(controller, req, rpcCallback), callback) @@ -386,8 +386,8 @@ protected NavigableMap getFinalResult() { } private static void findMedian( - CompletableFuture future, AsyncTable table, - ColumnInterpreter ci, Scan scan, NavigableMap sumByRegion) { + CompletableFuture future, AsyncTable table, + ColumnInterpreter ci, Scan scan, NavigableMap sumByRegion) { double halfSum = ci.divideForAvg(sumByRegion.values().stream().reduce(ci::add).get(), 2L); S movingSum = null; byte[] startRow = null; @@ -453,9 +453,9 @@ public void onComplete() { }); } - public static - CompletableFuture median(AsyncTable table, - ColumnInterpreter ci, Scan scan) { + public static CompletableFuture + median(AsyncTable table, ColumnInterpreter ci, + Scan scan) { CompletableFuture future = new CompletableFuture<>(); addListener(sumByRegion(table, ci, scan), (sumByRegion, error) -> { if (error != null) { diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java index 5571e1b14cb6..02120ae8702f 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/AggregateImplementation.java @@ -24,7 +24,6 @@ import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.nio.ByteBuffer; @@ -32,7 +31,6 @@ import java.util.Collections; import java.util.List; import java.util.NavigableSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.client.Scan; @@ -48,11 +46,11 @@ import org.slf4j.LoggerFactory; /** - * A concrete AggregateProtocol implementation. Its system level coprocessor - * that computes the aggregate function at a region level. - * {@link ColumnInterpreter} is used to interpret column value. This class is - * parameterized with the following (these are the types with which the {@link ColumnInterpreter} - * is parameterized, and for more description on these, refer to {@link ColumnInterpreter}): + * A concrete AggregateProtocol implementation. Its system level coprocessor that computes the + * aggregate function at a region level. {@link ColumnInterpreter} is used to interpret column + * value. This class is parameterized with the following (these are the types with which the + * {@link ColumnInterpreter} is parameterized, and for more description on these, refer to + * {@link ColumnInterpreter}): * @param Cell value data type * @param Promoted data type * @param

    PB message that is used to transport initializer specific bytes @@ -61,20 +59,19 @@ */ @InterfaceAudience.Private public class AggregateImplementation - extends AggregateService implements RegionCoprocessor { + extends AggregateService implements RegionCoprocessor { protected static final Logger log = LoggerFactory.getLogger(AggregateImplementation.class); private RegionCoprocessorEnvironment env; /** - * Gives the maximum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, maximum value for the - * entire column family will be returned. + * Gives the maximum for a given combination of column qualifier and column family, in the given + * row range as defined in the Scan object. In its current implementation, it takes one column + * family and one column qualifier (if provided). In case of null column qualifier, maximum value + * for the entire column family will be returned. */ @Override public void getMax(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { InternalScanner scanner = null; AggregateResponse response = null; T max = null; @@ -112,7 +109,8 @@ public void getMax(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Maximum from this region is " @@ -121,15 +119,14 @@ public void getMax(RpcController controller, AggregateRequest request, } /** - * Gives the minimum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, minimum value for the - * entire column family will be returned. + * Gives the minimum for a given combination of column qualifier and column family, in the given + * row range as defined in the Scan object. In its current implementation, it takes one column + * family and one column qualifier (if provided). In case of null column qualifier, minimum value + * for the entire column family will be returned. */ @Override public void getMin(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; T min = null; @@ -156,8 +153,8 @@ public void getMin(RpcController controller, AggregateRequest request, results.clear(); } while (hasMoreRows); if (min != null) { - response = AggregateResponse.newBuilder().addFirstPart( - ci.getProtoForCellType(min).toByteString()).build(); + response = AggregateResponse.newBuilder() + .addFirstPart(ci.getProtoForCellType(min).toByteString()).build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -165,7 +162,8 @@ public void getMin(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Minimum from this region is " @@ -174,15 +172,14 @@ public void getMin(RpcController controller, AggregateRequest request, } /** - * Gives the sum for a given combination of column qualifier and column - * family, in the given row range as defined in the Scan object. In its - * current implementation, it takes one column family and one column qualifier - * (if provided). In case of null column qualifier, sum for the entire column - * family will be returned. + * Gives the sum for a given combination of column qualifier and column family, in the given row + * range as defined in the Scan object. In its current implementation, it takes one column family + * and one column qualifier (if provided). In case of null column qualifier, sum for the entire + * column family will be returned. */ @Override public void getSum(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; long sum = 0L; @@ -212,8 +209,8 @@ public void getSum(RpcController controller, AggregateRequest request, results.clear(); } while (hasMoreRows); if (sumVal != null) { - response = AggregateResponse.newBuilder().addFirstPart( - ci.getProtoForPromotedType(sumVal).toByteString()).build(); + response = AggregateResponse.newBuilder() + .addFirstPart(ci.getProtoForPromotedType(sumVal).toByteString()).build(); } } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -221,21 +218,22 @@ public void getSum(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } - log.debug("Sum from this region is " - + env.getRegion().getRegionInfo().getRegionNameAsString() + ": " + sum); + log.debug("Sum from this region is " + env.getRegion().getRegionInfo().getRegionNameAsString() + + ": " + sum); done.run(response); } /** - * Gives the row count for the given column family and column qualifier, in - * the given row range as defined in the Scan object. + * Gives the row count for the given column family and column qualifier, in the given row range as + * defined in the Scan object. */ @Override public void getRowNum(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; long counter = 0L; List results = new ArrayList<>(); @@ -244,8 +242,8 @@ public void getRowNum(RpcController controller, AggregateRequest request, Scan scan = ProtobufUtil.toScan(request.getScan()); byte[][] colFamilies = scan.getFamilies(); byte[] colFamily = colFamilies != null ? colFamilies[0] : null; - NavigableSet qualifiers = colFamilies != null ? - scan.getFamilyMap().get(colFamily) : null; + NavigableSet qualifiers = + colFamilies != null ? scan.getFamilyMap().get(colFamily) : null; byte[] qualifier = null; if (qualifiers != null && !qualifiers.isEmpty()) { qualifier = qualifiers.pollFirst(); @@ -264,15 +262,15 @@ public void getRowNum(RpcController controller, AggregateRequest request, } while (hasMoreRows); ByteBuffer bb = ByteBuffer.allocate(8).putLong(counter); bb.rewind(); - response = AggregateResponse.newBuilder().addFirstPart( - ByteString.copyFrom(bb)).build(); + response = AggregateResponse.newBuilder().addFirstPart(ByteString.copyFrom(bb)).build(); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); } finally { if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } log.info("Row counter from this region is " @@ -281,21 +279,18 @@ public void getRowNum(RpcController controller, AggregateRequest request, } /** - * Gives a Pair with first object as Sum and second object as row count, - * computed for a given combination of column qualifier and column family in - * the given row range as defined in the Scan object. In its current - * implementation, it takes one column family and one column qualifier (if - * provided). In case of null column qualifier, an aggregate sum over all the - * entire column family will be returned. + * Gives a Pair with first object as Sum and second object as row count, computed for a given + * combination of column qualifier and column family in the given row range as defined in the Scan + * object. In its current implementation, it takes one column family and one column qualifier (if + * provided). In case of null column qualifier, an aggregate sum over all the entire column family + * will be returned. *

    - * The average is computed in - * AggregationClient#avg(byte[], ColumnInterpreter, Scan) by - * processing results from all regions, so its "ok" to pass sum and a Long - * type. + * The average is computed in AggregationClient#avg(byte[], ColumnInterpreter, Scan) by processing + * results from all regions, so its "ok" to pass sum and a Long type. */ @Override public void getAvg(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; try { @@ -318,8 +313,8 @@ public void getAvg(RpcController controller, AggregateRequest request, hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { - sumVal = ci.add(sumVal, ci.castToReturnType(ci.getValue(colFamily, - qualifier, results.get(i)))); + sumVal = ci.add(sumVal, + ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i)))); } rowCountVal++; } while (hasMoreRows); @@ -338,24 +333,24 @@ public void getAvg(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); } /** - * Gives a Pair with first object a List containing Sum and sum of squares, - * and the second object as row count. It is computed for a given combination of - * column qualifier and column family in the given row range as defined in the - * Scan object. In its current implementation, it takes one column family and - * one column qualifier (if provided). The idea is get the value of variance first: - * the average of the squares less the square of the average a standard - * deviation is square root of variance. + * Gives a Pair with first object a List containing Sum and sum of squares, and the second object + * as row count. It is computed for a given combination of column qualifier and column family in + * the given row range as defined in the Scan object. In its current implementation, it takes one + * column family and one column qualifier (if provided). The idea is get the value of variance + * first: the average of the squares less the square of the average a standard deviation is square + * root of variance. */ @Override public void getStd(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { InternalScanner scanner = null; AggregateResponse response = null; try { @@ -379,8 +374,8 @@ public void getStd(RpcController controller, AggregateRequest request, hasMoreRows = scanner.next(results); int listSize = results.size(); for (int i = 0; i < listSize; i++) { - tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, - qualifier, results.get(i)))); + tempVal = ci.add(tempVal, + ci.castToReturnType(ci.getValue(colFamily, qualifier, results.get(i)))); } results.clear(); sumVal = ci.add(sumVal, tempVal); @@ -404,23 +399,22 @@ public void getStd(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); } /** - * Gives a List containing sum of values and sum of weights. - * It is computed for the combination of column - * family and column qualifier(s) in the given row range as defined in the - * Scan object. In its current implementation, it takes one column family and - * two column qualifiers. The first qualifier is for values column and - * the second qualifier (optional) is for weight column. + * Gives a List containing sum of values and sum of weights. It is computed for the combination of + * column family and column qualifier(s) in the given row range as defined in the Scan object. In + * its current implementation, it takes one column family and two column qualifiers. The first + * qualifier is for values column and the second qualifier (optional) is for weight column. */ @Override public void getMedian(RpcController controller, AggregateRequest request, - RpcCallback done) { + RpcCallback done) { AggregateResponse response = null; InternalScanner scanner = null; try { @@ -447,11 +441,10 @@ public void getMedian(RpcController controller, AggregateRequest request, int listSize = results.size(); for (int i = 0; i < listSize; i++) { Cell kv = results.get(i); - tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, - valQualifier, kv))); + tempVal = ci.add(tempVal, ci.castToReturnType(ci.getValue(colFamily, valQualifier, kv))); if (weightQualifier != null) { tempWeight = ci.add(tempWeight, - ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); + ci.castToReturnType(ci.getValue(colFamily, weightQualifier, kv))); } } results.clear(); @@ -471,7 +464,8 @@ public void getMedian(RpcController controller, AggregateRequest request, if (scanner != null) { try { scanner.close(); - } catch (IOException ignored) {} + } catch (IOException ignored) { + } } } done.run(response); @@ -479,11 +473,11 @@ public void getMedian(RpcController controller, AggregateRequest request, @SuppressWarnings("unchecked") // Used server-side too by Aggregation Coprocesor Endpoint. Undo this interdependence. TODO. - ColumnInterpreter constructColumnInterpreterFromRequest( - AggregateRequest request) throws IOException { + ColumnInterpreter constructColumnInterpreterFromRequest(AggregateRequest request) + throws IOException { String className = request.getInterpreterClassName(); try { - ColumnInterpreter ci; + ColumnInterpreter ci; Class cls = Class.forName(className); ci = (ColumnInterpreter) cls.getDeclaredConstructor().newInstance(); @@ -493,8 +487,8 @@ ColumnInterpreter constructColumnInterpreterFromRequest( ci.initialize(initMsg); } return ci; - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | - NoSuchMethodException | InvocationTargetException e) { + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | NoSuchMethodException | InvocationTargetException e) { throw new IOException(e); } } @@ -507,17 +501,17 @@ public Iterable getServices() { /** * Stores a reference to the coprocessor environment provided by the * {@link org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost} from the region where this - * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded - * on a table region, so always expects this to be an instance of + * coprocessor is loaded. Since this is a coprocessor endpoint, it always expects to be loaded on + * a table region, so always expects this to be an instance of * {@link RegionCoprocessorEnvironment}. * @param env the environment provided by the coprocessor host * @throws IOException if the provided environment is not an instance of - * {@code RegionCoprocessorEnvironment} + * {@code RegionCoprocessorEnvironment} */ @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; } else { throw new CoprocessorException("Must be loaded on a table region!"); } diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java index 38f5f434eb56..dd1ba1bb7fa1 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/coprocessor/Export.java @@ -7,21 +7,19 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.coprocessor; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.Closeable; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -31,7 +29,6 @@ import java.util.List; import java.util.Map; import java.util.TreeMap; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -81,10 +78,8 @@ import org.slf4j.LoggerFactory; /** - * Export an HBase table. Writes content to sequence files up in HDFS. Use - * {@link Import} to read it back in again. It is implemented by the endpoint - * technique. - * + * Export an HBase table. Writes content to sequence files up in HDFS. Use {@link Import} to read it + * back in again. It is implemented by the endpoint technique. * @see org.apache.hadoop.hbase.mapreduce.Export */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -114,27 +109,25 @@ static Map run(final Configuration conf, final String[] args) return run(conf, arguments.getFirst(), arguments.getSecond(), arguments.getThird()); } - public static Map run(final Configuration conf, TableName tableName, - Scan scan, Path dir) throws Throwable { + public static Map run(final Configuration conf, TableName tableName, Scan scan, + Path dir) throws Throwable { FileSystem fs = dir.getFileSystem(conf); UserProvider userProvider = UserProvider.instantiate(conf); checkDir(fs, dir); FsDelegationToken fsDelegationToken = new FsDelegationToken(userProvider, "renewer"); fsDelegationToken.acquireDelegationToken(fs); try { - final ExportProtos.ExportRequest request = getConfiguredRequest(conf, dir, - scan, fsDelegationToken.getUserToken()); + final ExportProtos.ExportRequest request = + getConfiguredRequest(conf, dir, scan, fsDelegationToken.getUserToken()); try (Connection con = ConnectionFactory.createConnection(conf); - Table table = con.getTable(tableName)) { + Table table = con.getTable(tableName)) { Map result = new TreeMap<>(Bytes.BYTES_COMPARATOR); - table.coprocessorService(ExportProtos.ExportService.class, - scan.getStartRow(), - scan.getStopRow(), - (ExportProtos.ExportService service) -> { + table.coprocessorService(ExportProtos.ExportService.class, scan.getStartRow(), + scan.getStopRow(), (ExportProtos.ExportService service) -> { ServerRpcController controller = new ServerRpcController(); Map rval = new TreeMap<>(Bytes.BYTES_COMPARATOR); - CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); service.export(controller, request, rpcCallback); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -159,8 +152,8 @@ private static boolean getCompression(final ExportProtos.ExportRequest request) } } - private static SequenceFile.CompressionType getCompressionType( - final ExportProtos.ExportRequest request) { + private static SequenceFile.CompressionType + getCompressionType(final ExportProtos.ExportRequest request) { if (request.hasCompressType()) { return SequenceFile.CompressionType.valueOf(request.getCompressType()); } else { @@ -173,20 +166,20 @@ private static CompressionCodec getCompressionCodec(final Configuration conf, try { Class codecClass; if (request.hasCompressCodec()) { - codecClass = conf.getClassByName(request.getCompressCodec()) - .asSubclass(CompressionCodec.class); + codecClass = + conf.getClassByName(request.getCompressCodec()).asSubclass(CompressionCodec.class); } else { codecClass = DEFAULT_CODEC; } return ReflectionUtils.newInstance(codecClass, conf); } catch (ClassNotFoundException e) { - throw new IllegalArgumentException("Compression codec " - + request.getCompressCodec() + " was not found.", e); + throw new IllegalArgumentException( + "Compression codec " + request.getCompressCodec() + " was not found.", e); } } private static SequenceFile.Writer.Option getOutputPath(final Configuration conf, - final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { + final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { Path file = new Path(request.getOutputPath(), "export-" + info.getEncodedName()); FileSystem fs = file.getFileSystem(conf); if (fs.exists(file)) { @@ -196,14 +189,14 @@ private static SequenceFile.Writer.Option getOutputPath(final Configuration conf } private static List getWriterOptions(final Configuration conf, - final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { + final RegionInfo info, final ExportProtos.ExportRequest request) throws IOException { List rval = new LinkedList<>(); rval.add(SequenceFile.Writer.keyClass(ImmutableBytesWritable.class)); rval.add(SequenceFile.Writer.valueClass(Result.class)); rval.add(getOutputPath(conf, info, request)); if (getCompression(request)) { rval.add(SequenceFile.Writer.compression(getCompressionType(request), - getCompressionCodec(conf, request))); + getCompressionCodec(conf, request))); } else { rval.add(SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE)); } @@ -216,7 +209,7 @@ private static ExportProtos.ExportResponse processData(final Region region, ScanCoprocessor cp = new ScanCoprocessor(region); RegionScanner scanner = null; try (RegionOp regionOp = new RegionOp(region); - SecureWriter out = new SecureWriter(conf, userProvider, userToken, opts)) { + SecureWriter out = new SecureWriter(conf, userProvider, userToken, opts)) { scanner = cp.checkScannerOpen(scan); ImmutableBytesWritable key = new ImmutableBytesWritable(); long rowCount = 0; @@ -236,8 +229,8 @@ private static ExportProtos.ExportResponse processData(final Region region, Cell firstCell = cells.get(0); for (Cell cell : cells) { if (Bytes.compareTo(firstCell.getRowArray(), firstCell.getRowOffset(), - firstCell.getRowLength(), cell.getRowArray(), cell.getRowOffset(), - cell.getRowLength()) != 0) { + firstCell.getRowLength(), cell.getRowArray(), cell.getRowOffset(), + cell.getRowLength()) != 0) { throw new IOException("Why the RegionScanner#nextRaw returns the data of different" + " rows?? first row=" + Bytes.toHex(firstCell.getRowArray(), firstCell.getRowOffset(), @@ -258,10 +251,8 @@ private static ExportProtos.ExportResponse processData(final Region region, } results.clear(); } while (hasMore); - return ExportProtos.ExportResponse.newBuilder() - .setRowCount(rowCount) - .setCellCount(cellCount) - .build(); + return ExportProtos.ExportResponse.newBuilder().setRowCount(rowCount).setCellCount(cellCount) + .build(); } finally { cp.checkScannerClose(scanner); } @@ -276,31 +267,24 @@ private static void checkDir(final FileSystem fs, final Path dir) throws IOExcep } } - private static ExportProtos.ExportRequest getConfiguredRequest(Configuration conf, - Path dir, final Scan scan, final Token userToken) throws IOException { + private static ExportProtos.ExportRequest getConfiguredRequest(Configuration conf, Path dir, + final Scan scan, final Token userToken) throws IOException { boolean compressed = conf.getBoolean(FileOutputFormat.COMPRESS, false); - String compressionType = conf.get(FileOutputFormat.COMPRESS_TYPE, - DEFAULT_TYPE.toString()); - String compressionCodec = conf.get(FileOutputFormat.COMPRESS_CODEC, - DEFAULT_CODEC.getName()); + String compressionType = conf.get(FileOutputFormat.COMPRESS_TYPE, DEFAULT_TYPE.toString()); + String compressionCodec = conf.get(FileOutputFormat.COMPRESS_CODEC, DEFAULT_CODEC.getName()); DelegationToken protoToken = null; if (userToken != null) { - protoToken = DelegationToken.newBuilder() - .setIdentifier(ByteStringer.wrap(userToken.getIdentifier())) + protoToken = + DelegationToken.newBuilder().setIdentifier(ByteStringer.wrap(userToken.getIdentifier())) .setPassword(ByteStringer.wrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); + .setKind(userToken.getKind().toString()).setService(userToken.getService().toString()) + .build(); } - LOG.info("compressed=" + compressed - + ", compression type=" + compressionType - + ", compression codec=" + compressionCodec - + ", userToken=" + userToken); + LOG.info("compressed=" + compressed + ", compression type=" + compressionType + + ", compression codec=" + compressionCodec + ", userToken=" + userToken); ExportProtos.ExportRequest.Builder builder = ExportProtos.ExportRequest.newBuilder() - .setScan(ProtobufUtil.toScan(scan)) - .setOutputPath(dir.toString()) - .setCompressed(compressed) - .setCompressCodec(compressionCodec) - .setCompressType(compressionType); + .setScan(ProtobufUtil.toScan(scan)).setOutputPath(dir.toString()).setCompressed(compressed) + .setCompressCodec(compressionCodec).setCompressType(compressionType); if (protoToken != null) { builder.setFsToken(protoToken); } @@ -328,11 +312,11 @@ public Iterable getServices() { @Override public void export(RpcController controller, ExportProtos.ExportRequest request, - RpcCallback done) { + RpcCallback done) { Region region = env.getRegion(); Configuration conf = HBaseConfiguration.create(env.getConfiguration()); conf.setStrings("io.serializations", conf.get("io.serializations"), - ResultSerialization.class.getName()); + ResultSerialization.class.getName()); try { Scan scan = validateKey(region.getRegionInfo(), request); Token userToken = null; @@ -340,12 +324,11 @@ public void export(RpcController controller, ExportProtos.ExportRequest request, LOG.warn("Hadoop security is enable, but no found of user token"); } else if (userProvider.isHadoopSecurityEnabled()) { userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), - request.getFsToken().getPassword().toByteArray(), - new Text(request.getFsToken().getKind()), - new Text(request.getFsToken().getService())); + request.getFsToken().getPassword().toByteArray(), + new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService())); } - ExportProtos.ExportResponse response = processData(region, conf, userProvider, - scan, userToken, getWriterOptions(conf, region.getRegionInfo(), request)); + ExportProtos.ExportResponse response = processData(region, conf, userProvider, scan, + userToken, getWriterOptions(conf, region.getRegionInfo(), request)); done.run(response); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -358,14 +341,12 @@ private Scan validateKey(final RegionInfo region, final ExportProtos.ExportReque Scan scan = ProtobufUtil.toScan(request.getScan()); byte[] regionStartKey = region.getStartKey(); byte[] originStartKey = scan.getStartRow(); - if (originStartKey == null - || Bytes.compareTo(originStartKey, regionStartKey) < 0) { + if (originStartKey == null || Bytes.compareTo(originStartKey, regionStartKey) < 0) { scan.setStartRow(regionStartKey); } byte[] regionEndKey = region.getEndKey(); byte[] originEndKey = scan.getStopRow(); - if (originEndKey == null - || Bytes.compareTo(originEndKey, regionEndKey) > 0) { + if (originEndKey == null || Bytes.compareTo(originEndKey, regionEndKey) > 0) { scan.setStartRow(regionEndKey); } return scan; @@ -423,8 +404,8 @@ void checkScannerClose(final InternalScanner s) throws IOException { } } - boolean preScannerNext(final InternalScanner s, - final List results, final int limit) throws IOException { + boolean preScannerNext(final InternalScanner s, final List results, final int limit) + throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { @@ -433,9 +414,8 @@ boolean preScannerNext(final InternalScanner s, } } - boolean postScannerNext(final InternalScanner s, - final List results, final int limit, boolean hasMore) - throws IOException { + boolean postScannerNext(final InternalScanner s, final List results, final int limit, + boolean hasMore) throws IOException { if (region.getCoprocessorHost() == null) { return false; } else { @@ -447,15 +427,13 @@ boolean postScannerNext(final InternalScanner s, private static class SecureWriter implements Closeable { private final PrivilegedWriter privilegedWriter; - SecureWriter(final Configuration conf, final UserProvider userProvider, - final Token userToken, final List opts) - throws IOException { + SecureWriter(final Configuration conf, final UserProvider userProvider, final Token userToken, + final List opts) throws IOException { User user = getActiveUser(userProvider, userToken); try { SequenceFile.Writer sequenceFileWriter = - user.runAs((PrivilegedExceptionAction) () -> - SequenceFile.createWriter(conf, - opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); + user.runAs((PrivilegedExceptionAction) () -> SequenceFile + .createWriter(conf, opts.toArray(new SequenceFile.Writer.Option[opts.size()]))); privilegedWriter = new PrivilegedWriter(user, sequenceFileWriter); } catch (InterruptedException e) { throw new IOException(e); @@ -483,8 +461,7 @@ public void close() throws IOException { } } - private static class PrivilegedWriter implements PrivilegedExceptionAction, - Closeable { + private static class PrivilegedWriter implements PrivilegedExceptionAction, Closeable { private final User user; private final SequenceFile.Writer out; private Object key; @@ -541,11 +518,8 @@ public long getCellCount() { @Override public String toString() { StringBuilder builder = new StringBuilder(35); - return builder.append("rowCount=") - .append(rowCount) - .append(", cellCount=") - .append(cellCount) - .toString(); + return builder.append("rowCount=").append(rowCount).append(", cellCount=").append(cellCount) + .toString(); } } } diff --git a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java index fb161d94661c..65b9b8dd52eb 100644 --- a/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java +++ b/hbase-endpoint/src/main/java/org/apache/hadoop/hbase/security/access/SecureBulkLoadEndpoint.java @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.security.access; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.Collections; import java.util.List; import java.util.Map; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor; @@ -48,7 +45,6 @@ import org.apache.hadoop.hbase.regionserver.HRegion; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,8 +66,8 @@ public class SecureBulkLoadEndpoint extends SecureBulkLoadService implements Reg @Override public void start(CoprocessorEnvironment env) { - this.env = (RegionCoprocessorEnvironment)env; - rsServices = ((HasRegionServerServices)this.env).getRegionServerServices(); + this.env = (RegionCoprocessorEnvironment) env; + rsServices = ((HasRegionServerServices) this.env).getRegionServerServices(); LOG.warn("SecureBulkLoadEndpoint is deprecated. It will be removed in future releases."); LOG.warn("Secure bulk load has been integrated into HBase core."); } @@ -82,12 +78,12 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void prepareBulkLoad(RpcController controller, PrepareBulkLoadRequest request, - RpcCallback done) { + RpcCallback done) { try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); - String bulkToken = secureBulkLoadManager.prepareBulkLoad((HRegion) this.env.getRegion(), - convert(request)); + String bulkToken = + secureBulkLoadManager.prepareBulkLoad((HRegion) this.env.getRegion(), convert(request)); done.run(PrepareBulkLoadResponse.newBuilder().setBulkToken(bulkToken).build()); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -96,23 +92,22 @@ public void prepareBulkLoad(RpcController controller, PrepareBulkLoadRequest req } /** - * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. + * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. */ org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest - convert(PrepareBulkLoadRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { - byte [] bytes = request.toByteArray(); - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.Builder - builder = - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest. - newBuilder(); + convert(PrepareBulkLoadRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { + byte[] bytes = request.toByteArray(); + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest.Builder builder = + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.PrepareBulkLoadRequest + .newBuilder(); builder.mergeFrom(bytes); return builder.build(); } @Override public void cleanupBulkLoad(RpcController controller, CleanupBulkLoadRequest request, - RpcCallback done) { + RpcCallback done) { try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); secureBulkLoadManager.cleanupBulkLoad((HRegion) this.env.getRegion(), convert(request)); @@ -124,30 +119,29 @@ public void cleanupBulkLoad(RpcController controller, CleanupBulkLoadRequest req } /** - * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. + * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. */ org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest - convert(CleanupBulkLoadRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { - byte [] bytes = request.toByteArray(); - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.Builder - builder = - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest. - newBuilder(); + convert(CleanupBulkLoadRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { + byte[] bytes = request.toByteArray(); + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest.Builder builder = + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadRequest + .newBuilder(); builder.mergeFrom(bytes); return builder.build(); } @Override public void secureBulkLoadHFiles(RpcController controller, SecureBulkLoadHFilesRequest request, - RpcCallback done) { + RpcCallback done) { boolean loaded = false; Map> map = null; try { SecureBulkLoadManager secureBulkLoadManager = this.rsServices.getSecureBulkLoadManager(); BulkLoadHFileRequest bulkLoadHFileRequest = ConvertSecureBulkLoadHFilesRequest(request); map = secureBulkLoadManager.secureBulkLoadHFiles((HRegion) this.env.getRegion(), - convert(bulkLoadHFileRequest)); + convert(bulkLoadHFileRequest)); loaded = map != null && !map.isEmpty(); } catch (IOException e) { CoprocessorRpcUtils.setControllerException(controller, e); @@ -156,26 +150,24 @@ public void secureBulkLoadHFiles(RpcController controller, SecureBulkLoadHFilesR } /** - * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. + * Convert from CPEP protobuf 2.5 to internal protobuf 3.3. */ org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest - convert(BulkLoadHFileRequest request) - throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { - byte [] bytes = request.toByteArray(); - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder - builder = - org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest. - newBuilder(); + convert(BulkLoadHFileRequest request) + throws org.apache.hbase.thirdparty.com.google.protobuf.InvalidProtocolBufferException { + byte[] bytes = request.toByteArray(); + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.Builder builder = + org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest + .newBuilder(); builder.mergeFrom(bytes); return builder.build(); } - private BulkLoadHFileRequest ConvertSecureBulkLoadHFilesRequest( - SecureBulkLoadHFilesRequest request) { + private BulkLoadHFileRequest + ConvertSecureBulkLoadHFilesRequest(SecureBulkLoadHFilesRequest request) { BulkLoadHFileRequest.Builder bulkLoadHFileRequest = BulkLoadHFileRequest.newBuilder(); - RegionSpecifier region = - ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, this.env - .getRegionInfo().getRegionName()); + RegionSpecifier region = ProtobufUtil.buildRegionSpecifier(RegionSpecifierType.REGION_NAME, + this.env.getRegionInfo().getRegionName()); bulkLoadHFileRequest.setRegion(region).setFsToken(request.getFsToken()) .setBulkToken(request.getBulkToken()).setAssignSeqNum(request.getAssignSeqNum()) .addAllFamilyPath(request.getFamilyPathList()); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java index d50ceb9c1c39..52c3fa6bacfa 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestAsyncAggregationClient.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java index 30fce3c78709..7ea9a165adbd 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/client/TestRpcControllerFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -52,7 +52,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Multiset; -@Category({MediumTests.class, ClientTests.class}) +@Category({ MediumTests.class, ClientTests.class }) public class TestRpcControllerFactory { @ClassRule diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java index 7b315f9f367b..92c606d1d78e 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpoint.java @@ -20,12 +20,10 @@ import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -43,7 +41,7 @@ * The aggregation implementation at a region. */ public class ColumnAggregationEndpoint extends ColumnAggregationService - implements RegionCoprocessor { + implements RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(ColumnAggregationEndpoint.class); private RegionCoprocessorEnvironment env = null; @@ -55,7 +53,7 @@ public Iterable getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -71,8 +69,8 @@ public void sum(RpcController controller, SumRequest request, RpcCallback getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -74,7 +72,7 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void sum(RpcController controller, ColumnAggregationNullResponseSumRequest request, - RpcCallback done) { + RpcCallback done) { // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. @@ -122,9 +120,8 @@ public void sum(RpcController controller, ColumnAggregationNullResponseSumReques } } } - done.run(ColumnAggregationNullResponseSumResponse.newBuilder().setSum(sumResult) - .build()); - LOG.info("Returning sum " + sumResult + " for region " + - Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); + done.run(ColumnAggregationNullResponseSumResponse.newBuilder().setSum(sumResult).build()); + LOG.info("Returning sum " + sumResult + " for region " + + Bytes.toStringBinary(env.getRegion().getRegionInfo().getRegionName())); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java index 4aaaea268e3d..f1a51a97fa33 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ColumnAggregationEndpointWithErrors.java @@ -20,12 +20,10 @@ import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.CoprocessorEnvironment; @@ -43,13 +41,13 @@ import org.slf4j.LoggerFactory; /** - * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on - * the last region in the table. This allows tests to ensure correct error handling of - * coprocessor endpoints throwing exceptions. + * Test coprocessor endpoint that always throws a {@link DoNotRetryIOException} for requests on the + * last region in the table. This allows tests to ensure correct error handling of coprocessor + * endpoints throwing exceptions. */ public class ColumnAggregationEndpointWithErrors - extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors - implements RegionCoprocessor { + extends ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors + implements RegionCoprocessor { private static final Logger LOG = LoggerFactory.getLogger(ColumnAggregationEndpointWithErrors.class); @@ -63,7 +61,7 @@ public Iterable getServices() { @Override public void start(CoprocessorEnvironment env) throws IOException { if (env instanceof RegionCoprocessorEnvironment) { - this.env = (RegionCoprocessorEnvironment)env; + this.env = (RegionCoprocessorEnvironment) env; return; } throw new CoprocessorException("Must be loaded on a table region!"); @@ -76,7 +74,7 @@ public void stop(CoprocessorEnvironment env) throws IOException { @Override public void sum(RpcController controller, ColumnAggregationWithErrorsSumRequest request, - RpcCallback done) { + RpcCallback done) { // aggregate at each region Scan scan = new Scan(); // Family is required in pb. Qualifier is not. diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java index 63af9ca7c488..ab24e0f032dd 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/ProtobufCoprocessorService.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -20,10 +20,8 @@ import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; import com.google.protobuf.Service; - import java.io.IOException; import java.util.Collections; - import org.apache.hadoop.hbase.CoprocessorEnvironment; import org.apache.hadoop.hbase.ipc.CoprocessorRpcUtils; import org.apache.hadoop.hbase.ipc.RpcServer; @@ -41,8 +39,9 @@ * service methods. For internal use by unit tests only. */ public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto - implements MasterCoprocessor, RegionCoprocessor { - public ProtobufCoprocessorService() {} + implements MasterCoprocessor, RegionCoprocessor { + public ProtobufCoprocessorService() { + } @Override public Iterable getServices() { @@ -51,34 +50,34 @@ public Iterable getServices() { @Override public void ping(RpcController controller, TestProtos.EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { done.run(TestProtos.EmptyResponseProto.getDefaultInstance()); } @Override public void echo(RpcController controller, TestProtos.EchoRequestProto request, - RpcCallback done) { + RpcCallback done) { String message = request.getMessage(); done.run(TestProtos.EchoResponseProto.newBuilder().setMessage(message).build()); } @Override public void error(RpcController controller, TestProtos.EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, new IOException("Test exception")); done.run(null); } @Override public void pause(RpcController controller, PauseRequestProto request, - RpcCallback done) { + RpcCallback done) { Threads.sleepWithoutInterrupt(request.getMs()); done.run(EmptyResponseProto.getDefaultInstance()); } @Override public void addr(RpcController controller, EmptyRequestProto request, - RpcCallback done) { + RpcCallback done) { done.run(AddrResponseProto.newBuilder() .setAddr(RpcServer.getRemoteAddress().get().getHostAddress()).build()); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java index 1ea3b1ed53c8..41b019cdb0f1 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestAsyncCoprocessorEndpoint.java @@ -78,10 +78,9 @@ public void testMasterCoprocessorService() throws Exception { TestProtos.EchoRequestProto request = TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); TestProtos.EchoResponseProto response = - admin - . - coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, - (s, c, done) -> s.echo(c, request, done)).get(); + admin. coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto::newStub, + (s, c, done) -> s.echo(c, request, done)).get(); assertEquals("hello", response.getMessage()); } @@ -89,10 +88,9 @@ public void testMasterCoprocessorService() throws Exception { public void testMasterCoprocessorError() throws Exception { TestProtos.EmptyRequestProto emptyRequest = TestProtos.EmptyRequestProto.getDefaultInstance(); try { - admin - . - coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto::newStub, - (s, c, done) -> s.error(c, emptyRequest, done)).get(); + admin. coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto::newStub, + (s, c, done) -> s.error(c, emptyRequest, done)).get(); fail("Should have thrown an exception"); } catch (Exception e) { } @@ -104,11 +102,9 @@ public void testRegionServerCoprocessorService() throws Exception { DummyRegionServerEndpointProtos.DummyRequest request = DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); DummyRegionServerEndpointProtos.DummyResponse response = - admin - . coprocessorService( - DummyRegionServerEndpointProtos.DummyService::newStub, - (s, c, done) -> s.dummyCall(c, request, done), serverName).get(); + admin. coprocessorService( + DummyRegionServerEndpointProtos.DummyService::newStub, + (s, c, done) -> s.dummyCall(c, request, done), serverName).get(); assertEquals(DUMMY_VALUE, response.getValue()); } @@ -118,11 +114,9 @@ public void testRegionServerCoprocessorServiceError() throws Exception { DummyRegionServerEndpointProtos.DummyRequest request = DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(); try { - admin - . coprocessorService( - DummyRegionServerEndpointProtos.DummyService::newStub, - (s, c, done) -> s.dummyThrow(c, request, done), serverName).get(); + admin. coprocessorService( + DummyRegionServerEndpointProtos.DummyService::newStub, + (s, c, done) -> s.dummyThrow(c, request, done), serverName).get(); fail("Should have thrown an exception"); } catch (Exception e) { assertTrue(e.getCause() instanceof RetriesExhaustedException); @@ -131,8 +125,9 @@ DummyRegionServerEndpointProtos.DummyResponse> coprocessorService( } public static class DummyRegionServerEndpoint extends DummyService - implements RegionServerCoprocessor { - public DummyRegionServerEndpoint() {} + implements RegionServerCoprocessor { + public DummyRegionServerEndpoint() { + } @Override public Iterable getServices() { @@ -154,8 +149,7 @@ public void dummyCall(RpcController controller, DummyRequest request, } @Override - public void dummyThrow(RpcController controller, - DummyRequest request, + public void dummyThrow(RpcController controller, DummyRequest request, RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, WHAT_TO_THROW); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java index c108db28a2ae..f5cee664f40c 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestBatchCoprocessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,7 +57,7 @@ /** * TestEndpoint: test cases to verify the batch execution of coprocessor Endpoint */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestBatchCoprocessorEndpoint { @ClassRule @@ -66,8 +66,7 @@ public class TestBatchCoprocessorEndpoint { private static final Logger LOG = LoggerFactory.getLogger(TestBatchCoprocessorEndpoint.class); - private static final TableName TEST_TABLE = - TableName.valueOf("TestTable"); + private static final TableName TEST_TABLE = TableName.valueOf("TestTable"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -84,17 +83,17 @@ public static void setupBeforeClass() throws Exception { // set configure to indicate which cp should be loaded Configuration conf = util.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), - ProtobufCoprocessorService.class.getName(), - ColumnAggregationEndpointWithErrors.class.getName(), - ColumnAggregationEndpointNullResponse.class.getName()); + org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), + ProtobufCoprocessorService.class.getName(), + ColumnAggregationEndpointWithErrors.class.getName(), + ColumnAggregationEndpointNullResponse.class.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); + ProtobufCoprocessorService.class.getName()); util.startMiniCluster(2); Admin admin = util.getAdmin(); HTableDescriptor desc = new HTableDescriptor(TEST_TABLE); desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); - admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]}); + admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] }); util.waitUntilAllRegionsAssigned(TEST_TABLE); admin.close(); @@ -116,24 +115,21 @@ public static void tearDownAfterClass() throws Exception { public void testAggregationNullResponse() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); ColumnAggregationNullResponseSumRequest.Builder builder = - ColumnAggregationNullResponseSumRequest - .newBuilder(); + ColumnAggregationNullResponseSumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(TEST_FAMILY)); if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { builder.setQualifier(ByteString.copyFrom(TEST_QUALIFIER)); } - Map results = - table.batchCoprocessorService( - ColumnAggregationServiceNullResponse.getDescriptor().findMethodByName("sum"), - builder.build(), ROWS[0], ROWS[ROWS.length - 1], - ColumnAggregationNullResponseSumResponse.getDefaultInstance()); + Map results = table.batchCoprocessorService( + ColumnAggregationServiceNullResponse.getDescriptor().findMethodByName("sum"), builder.build(), + ROWS[0], ROWS[ROWS.length - 1], + ColumnAggregationNullResponseSumResponse.getDefaultInstance()); int sumResult = 0; int expectedResult = 0; - for (Map.Entry e : - results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + for (Map.Entry e : results.entrySet()) { + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < rowSeperator2; i++) { @@ -152,29 +148,29 @@ private static byte[][] makeN(byte[] base, int n) { } private Map sum(final Table table, final byte[] family, - final byte[] qualifier, final byte[] start, final byte[] end) throws ServiceException, - Throwable { - ColumnAggregationProtos.SumRequest.Builder builder = ColumnAggregationProtos.SumRequest - .newBuilder(); + final byte[] qualifier, final byte[] start, final byte[] end) + throws ServiceException, Throwable { + ColumnAggregationProtos.SumRequest.Builder builder = + ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); if (qualifier != null && qualifier.length > 0) { builder.setQualifier(ByteString.copyFrom(qualifier)); } return table.batchCoprocessorService( - ColumnAggregationProtos.ColumnAggregationService.getDescriptor().findMethodByName("sum"), - builder.build(), start, end, ColumnAggregationProtos.SumResponse.getDefaultInstance()); + ColumnAggregationProtos.ColumnAggregationService.getDescriptor().findMethodByName("sum"), + builder.build(), start, end, ColumnAggregationProtos.SumResponse.getDefaultInstance()); } @Test public void testAggregationWithReturnValue() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], - ROWS[ROWS.length - 1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < ROWSIZE; i++) { @@ -185,13 +181,12 @@ public void testAggregationWithReturnValue() throws Throwable { results.clear(); // scan: for region 2 and region 3 - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], - ROWS[ROWS.length - 1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -204,13 +199,13 @@ public void testAggregationWithReturnValue() throws Throwable { @Test public void testAggregation() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[0], ROWS[ROWS.length - 1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < ROWSIZE; i++) { @@ -223,8 +218,8 @@ public void testAggregation() throws Throwable { sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -237,14 +232,10 @@ public void testAggregation() throws Throwable { @Test public void testAggregationWithErrors() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - final Map results = - Collections.synchronizedMap( - new TreeMap( - Bytes.BYTES_COMPARATOR - )); + final Map results = Collections.synchronizedMap( + new TreeMap(Bytes.BYTES_COMPARATOR)); ColumnAggregationWithErrorsSumRequest.Builder builder = - ColumnAggregationWithErrorsSumRequest - .newBuilder(); + ColumnAggregationWithErrorsSumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(TEST_FAMILY)); if (TEST_QUALIFIER != null && TEST_QUALIFIER.length > 0) { builder.setQualifier(ByteString.copyFrom(TEST_QUALIFIER)); @@ -253,18 +244,18 @@ public void testAggregationWithErrors() throws Throwable { boolean hasError = false; try { table.batchCoprocessorService( - ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors.getDescriptor() - .findMethodByName("sum"), - builder.build(), ROWS[0], ROWS[ROWS.length - 1], - ColumnAggregationWithErrorsSumResponse.getDefaultInstance(), - new Batch.Callback() { + ColumnAggregationWithErrorsProtos.ColumnAggregationServiceWithErrors.getDescriptor() + .findMethodByName("sum"), + builder.build(), ROWS[0], ROWS[ROWS.length - 1], + ColumnAggregationWithErrorsSumResponse.getDefaultInstance(), + new Batch.Callback() { - @Override - public void update(byte[] region, byte[] row, - ColumnAggregationWithErrorsSumResponse result) { - results.put(region, result); - } - }); + @Override + public void update(byte[] region, byte[] row, + ColumnAggregationWithErrorsSumResponse result) { + results.put(region, result); + } + }); } catch (Throwable t) { LOG.info("Exceptions in coprocessor service", t); hasError = true; @@ -273,8 +264,8 @@ public void update(byte[] region, byte[] row, int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value " + e.getValue().getSum() + " for region " - + Bytes.toStringBinary(e.getKey())); + LOG.info( + "Got value " + e.getValue().getSum() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue().getSum(); } for (int i = 0; i < rowSeperator2; i++) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java index 5c3e53639223..49affaf99b60 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestClassLoading.java @@ -64,7 +64,7 @@ /** * Test coprocessors class loading. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestClassLoading { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -91,17 +91,17 @@ public Optional getMasterObserver() { static final String cpName6 = "TestCP6"; private static Class regionCoprocessor1 = ColumnAggregationEndpoint.class; - // TOOD: Fix the import of this handler. It is coming in from a package that is far away. + // TOOD: Fix the import of this handler. It is coming in from a package that is far away. private static Class regionCoprocessor2 = TestServerCustomProtocol.PingHandler.class; private static Class regionServerCoprocessor = SampleRegionWALCoprocessor.class; private static Class masterCoprocessor = TestMasterCoprocessor.class; private static final String[] regionServerSystemCoprocessors = - new String[]{ regionServerCoprocessor.getSimpleName() }; + new String[] { regionServerCoprocessor.getSimpleName() }; - private static final String[] masterRegionServerSystemCoprocessors = new String[] { - regionCoprocessor1.getSimpleName(), MultiRowMutationEndpoint.class.getSimpleName(), - regionServerCoprocessor.getSimpleName() }; + private static final String[] masterRegionServerSystemCoprocessors = + new String[] { regionCoprocessor1.getSimpleName(), + MultiRowMutationEndpoint.class.getSimpleName(), regionServerCoprocessor.getSimpleName() }; @BeforeClass public static void setUpBeforeClass() throws Exception { @@ -109,19 +109,15 @@ public static void setUpBeforeClass() throws Exception { // regionCoprocessor1 will be loaded on all regionservers, since it is // loaded for any tables (user or meta). - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - regionCoprocessor1.getName()); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, regionCoprocessor1.getName()); // regionCoprocessor2 will be loaded only on regionservers that serve a // user table region. Therefore, if there are no user tables loaded, // this coprocessor will not be loaded on any regionserver. - conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, - regionCoprocessor2.getName()); + conf.setStrings(CoprocessorHost.USER_REGION_COPROCESSOR_CONF_KEY, regionCoprocessor2.getName()); - conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, - regionServerCoprocessor.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - masterCoprocessor.getName()); + conf.setStrings(CoprocessorHost.WAL_COPROCESSOR_CONF_KEY, regionServerCoprocessor.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, masterCoprocessor.getName()); TEST_UTIL.startMiniCluster(1); cluster = TEST_UTIL.getDFSCluster(); } @@ -132,11 +128,9 @@ public static void tearDownAfterClass() throws Exception { } static File buildCoprocessorJar(String className) throws Exception { - String code = - "import org.apache.hadoop.hbase.coprocessor.*;" + - "public class " + className + " implements RegionCoprocessor {}"; - return ClassLoaderTestHelper.buildJar( - TEST_UTIL.getDataTestDir().toString(), className, code); + String code = "import org.apache.hadoop.hbase.coprocessor.*;" + "public class " + className + + " implements RegionCoprocessor {}"; + return ClassLoaderTestHelper.buildJar(TEST_UTIL.getDataTestDir().toString(), className, code); } @Test @@ -150,31 +144,27 @@ public void testClassLoadingFromHDFS() throws Exception { // copy the jars into dfs fs.copyFromLocalFile(new Path(jarFile1.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + - jarFile1.getName(); + String jarFileOnHDFS1 = fs.getUri().toString() + Path.SEPARATOR + jarFile1.getName(); Path pathOnHDFS1 = new Path(jarFileOnHDFS1); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(pathOnHDFS1)); + assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS1)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS1); fs.copyFromLocalFile(new Path(jarFile2.getPath()), - new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + - jarFile2.getName(); + new Path(fs.getUri().toString() + Path.SEPARATOR)); + String jarFileOnHDFS2 = fs.getUri().toString() + Path.SEPARATOR + jarFile2.getName(); Path pathOnHDFS2 = new Path(jarFileOnHDFS2); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(pathOnHDFS2)); + assertTrue("Copy jar file to HDFS failed.", fs.exists(pathOnHDFS2)); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS2); // create a table that references the coprocessors HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("test")); - // without configuration values - htd.setValue("COPROCESSOR$1", jarFileOnHDFS1.toString() + "|" + cpName1 + - "|" + Coprocessor.PRIORITY_USER); - // with configuration values - htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 + - "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); + // without configuration values + htd.setValue("COPROCESSOR$1", + jarFileOnHDFS1.toString() + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER); + // with configuration values + htd.setValue("COPROCESSOR$2", jarFileOnHDFS2.toString() + "|" + cpName2 + "|" + + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { @@ -183,18 +173,17 @@ public void testClassLoadingFromHDFS() throws Exception { admin.deleteTable(tableName); } CoprocessorClassLoader.clearCache(); - byte[] startKey = {10, 63}; - byte[] endKey = {12, 43}; + byte[] startKey = { 10, 63 }; + byte[] endKey = { 12, 43 }; admin.createTable(htd, startKey, endKey, 4); waitForTable(htd.getTableName()); // verify that the coprocessors were loaded - boolean foundTableRegion=false; + boolean foundTableRegion = false; boolean found1 = true, found2 = true, found2_k1 = true, found2_k2 = true, found2_k3 = true; Map> regionsActiveClassLoaders = new HashMap<>(); MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: - hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { foundTableRegion = true; CoprocessorEnvironment env; @@ -212,8 +201,8 @@ public void testClassLoadingFromHDFS() throws Exception { found2_k2 = false; found2_k3 = false; } - regionsActiveClassLoaders - .put(region, ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); + regionsActiveClassLoaders.put(region, + ((CoprocessorHost) region.getCoprocessorHost()).getExternalClassLoaders()); } } @@ -228,17 +217,16 @@ public void testClassLoadingFromHDFS() throws Exception { CoprocessorClassLoader.getIfCached(pathOnHDFS1)); assertNotNull(jarFileOnHDFS2 + " was not cached", CoprocessorClassLoader.getIfCached(pathOnHDFS2)); - //two external jar used, should be one classloader per jar - assertEquals("The number of cached classloaders should be equal to the number" + - " of external jar files", + // two external jar used, should be one classloader per jar + assertEquals( + "The number of cached classloaders should be equal to the number" + " of external jar files", 2, CoprocessorClassLoader.getAllCached().size()); - //check if region active classloaders are shared across all RS regions - Set externalClassLoaders = new HashSet<>( - CoprocessorClassLoader.getAllCached()); + // check if region active classloaders are shared across all RS regions + Set externalClassLoaders = new HashSet<>(CoprocessorClassLoader.getAllCached()); for (Map.Entry> regionCP : regionsActiveClassLoaders.entrySet()) { assertTrue("Some CP classloaders for region " + regionCP.getKey() + " are not cached." - + " ClassLoader Cache:" + externalClassLoaders - + " Region ClassLoaders:" + regionCP.getValue(), + + " ClassLoader Cache:" + externalClassLoaders + " Region ClassLoaders:" + + regionCP.getValue(), externalClassLoaders.containsAll(regionCP.getValue())); } } @@ -255,8 +243,8 @@ public void testClassLoadingFromLocalFS() throws Exception { // create a table that references the jar HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cpName3)); htd.addFamily(new HColumnDescriptor("test")); - htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName3 + "|" + - Coprocessor.PRIORITY_USER); + htd.setValue("COPROCESSOR$1", + getLocalPath(jarFile) + "|" + cpName3 + "|" + Coprocessor.PRIORITY_USER); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(htd); waitForTable(htd.getTableName()); @@ -264,7 +252,7 @@ public void testClassLoadingFromLocalFS() throws Exception { // verify that the coprocessor was loaded boolean found = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName3)) { found = (region.getCoprocessorHost().findCoprocessor(cpName3) != null); } @@ -280,8 +268,8 @@ public void testPrivateClassLoader() throws Exception { // create a table that references the jar HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(cpName4)); htd.addFamily(new HColumnDescriptor("test")); - htd.setValue("COPROCESSOR$1", getLocalPath(jarFile) + "|" + cpName4 + "|" + - Coprocessor.PRIORITY_USER); + htd.setValue("COPROCESSOR$1", + getLocalPath(jarFile) + "|" + cpName4 + "|" + Coprocessor.PRIORITY_USER); Admin admin = TEST_UTIL.getAdmin(); admin.createTable(htd); waitForTable(htd.getTableName()); @@ -289,7 +277,7 @@ public void testPrivateClassLoader() throws Exception { // verify that the coprocessor was loaded correctly boolean found = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(cpName4)) { Coprocessor cp = region.getCoprocessorHost().findCoprocessor(cpName4); if (cp != null) { @@ -317,12 +305,10 @@ public void testHBase3810() throws Exception { String cpKey2 = " Coprocessor$2 "; String cpKey3 = " coprocessor$03 "; - String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + - Coprocessor.PRIORITY_USER; + String cpValue1 = getLocalPath(jarFile1) + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER; String cpValue2 = getLocalPath(jarFile2) + " | " + cpName2 + " | "; // load from default class loader - String cpValue3 = - " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; + String cpValue3 = " | org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver | | k=v "; // create a table that references the jar HTableDescriptor htd = new HTableDescriptor(tableName); @@ -334,14 +320,12 @@ public void testHBase3810() throws Exception { htd.setValue(cpKey3, cpValue3); // add 2 coprocessor by using new htd.setCoprocessor() api - htd.addCoprocessor(cpName5, new Path(getLocalPath(jarFile5)), - Coprocessor.PRIORITY_USER, null); + htd.addCoprocessor(cpName5, new Path(getLocalPath(jarFile5)), Coprocessor.PRIORITY_USER, null); Map kvs = new HashMap<>(); kvs.put("k1", "v1"); kvs.put("k2", "v2"); kvs.put("k3", "v3"); - htd.addCoprocessor(cpName6, new Path(getLocalPath(jarFile6)), - Coprocessor.PRIORITY_USER, kvs); + htd.addCoprocessor(cpName6, new Path(getLocalPath(jarFile6)), Coprocessor.PRIORITY_USER, kvs); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { @@ -354,23 +338,17 @@ public void testHBase3810() throws Exception { waitForTable(htd.getTableName()); // verify that the coprocessor was loaded - boolean found_2 = false, found_1 = false, found_3 = false, - found_5 = false, found_6 = false; - boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, - found6_k4 = false; + boolean found_2 = false, found_1 = false, found_3 = false, found_5 = false, found_6 = false; + boolean found6_k1 = false, found6_k2 = false, found6_k3 = false, found6_k4 = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { - found_1 = found_1 || - (region.getCoprocessorHost().findCoprocessor(cpName1) != null); - found_2 = found_2 || - (region.getCoprocessorHost().findCoprocessor(cpName2) != null); - found_3 = found_3 || - (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") - != null); - found_5 = found_5 || - (region.getCoprocessorHost().findCoprocessor(cpName5) != null); + found_1 = found_1 || (region.getCoprocessorHost().findCoprocessor(cpName1) != null); + found_2 = found_2 || (region.getCoprocessorHost().findCoprocessor(cpName2) != null); + found_3 = found_3 + || (region.getCoprocessorHost().findCoprocessor("SimpleRegionObserver") != null); + found_5 = found_5 || (region.getCoprocessorHost().findCoprocessor(cpName5) != null); CoprocessorEnvironment env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName6); @@ -413,27 +391,24 @@ void loadingClassFromLibDirInJar(String libPrefix) throws Exception { File innerJarFile2 = buildCoprocessorJar(cpName2); File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar"); - ClassLoaderTestHelper.addJarFilesToJar( - outerJarFile, libPrefix, innerJarFile1, innerJarFile2); + ClassLoaderTestHelper.addJarFilesToJar(outerJarFile, libPrefix, innerJarFile1, innerJarFile2); // copy the jars into dfs fs.copyFromLocalFile(new Path(outerJarFile.getPath()), new Path(fs.getUri().toString() + Path.SEPARATOR)); - String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + - outerJarFile.getName(); - assertTrue("Copy jar file to HDFS failed.", - fs.exists(new Path(jarFileOnHDFS))); + String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR + outerJarFile.getName(); + assertTrue("Copy jar file to HDFS failed.", fs.exists(new Path(jarFileOnHDFS))); LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS); // create a table that references the coprocessors HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor("test")); - // without configuration values - htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 + - "|" + Coprocessor.PRIORITY_USER); - // with configuration values - htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 + - "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); + // without configuration values + htd.setValue("COPROCESSOR$1", + jarFileOnHDFS.toString() + "|" + cpName1 + "|" + Coprocessor.PRIORITY_USER); + // with configuration values + htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 + "|" + + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3"); Admin admin = TEST_UTIL.getAdmin(); if (admin.tableExists(tableName)) { if (admin.isTableEnabled(tableName)) { @@ -445,10 +420,9 @@ void loadingClassFromLibDirInJar(String libPrefix) throws Exception { waitForTable(htd.getTableName()); // verify that the coprocessors were loaded - boolean found1 = false, found2 = false, found2_k1 = false, - found2_k2 = false, found2_k3 = false; + boolean found1 = false, found2 = false, found2_k1 = false, found2_k2 = false, found2_k3 = false; MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster(); - for (HRegion region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { + for (HRegion region : hbase.getRegionServer(0).getOnlineRegionsLocalContext()) { if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) { CoprocessorEnvironment env; env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1); @@ -481,24 +455,21 @@ public void testRegionServerCoprocessorsReported() throws Exception { } /** - * return the subset of all regionservers - * (actually returns set of ServerLoads) - * which host some region in a given table. - * used by assertAllRegionServers() below to - * test reporting of loaded coprocessors. + * return the subset of all regionservers (actually returns set of ServerLoads) which host some + * region in a given table. used by assertAllRegionServers() below to test reporting of loaded + * coprocessors. * @param tableName : given table. * @return subset of all servers. */ Map serversForTable(String tableName) { Map serverLoadHashMap = new HashMap<>(); - for(Map.Entry server: - TEST_UTIL.getMiniHBaseCluster().getMaster().getServerManager(). - getOnlineServers().entrySet()) { - for(Map.Entry region: - server.getValue().getRegionMetrics().entrySet()) { + for (Map.Entry server : TEST_UTIL.getMiniHBaseCluster().getMaster() + .getServerManager().getOnlineServers().entrySet()) { + for (Map.Entry region : server.getValue().getRegionMetrics() + .entrySet()) { if (region.getValue().getNameAsString().equals(tableName)) { // this server hosts a region of tableName: add this server.. - serverLoadHashMap.put(server.getKey(),server.getValue()); + serverLoadHashMap.put(server.getKey(), server.getValue()); // .. and skip the rest of the regions that it hosts. break; } @@ -519,13 +490,12 @@ void assertAllRegionServers(String tableName) throws InterruptedException { } for (int i = 0; i < 5; i++) { boolean any_failed = false; - for(Map.Entry server: servers.entrySet()) { + for (Map.Entry server : servers.entrySet()) { String[] actualCoprocessors = - server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]); + server.getValue().getCoprocessorNames().stream().toArray(size -> new String[size]); if (!Arrays.equals(actualCoprocessors, expectedCoprocessors)) { - LOG.debug("failed comparison: actual: " + - Arrays.toString(actualCoprocessors) + - " ; expected: " + Arrays.toString(expectedCoprocessors)); + LOG.debug("failed comparison: actual: " + Arrays.toString(actualCoprocessors) + + " ; expected: " + Arrays.toString(expectedCoprocessors)); any_failed = true; expectedCoprocessors = switchExpectedCoprocessors(expectedCoprocessors); break; @@ -556,11 +526,9 @@ public void testMasterCoprocessorsReported() { // HBASE 4070: Improve region server metrics to report loaded coprocessors // to master: verify that the master is reporting the correct set of // loaded coprocessors. - final String loadedMasterCoprocessorsVerify = - "[" + masterCoprocessor.getSimpleName() + "]"; + final String loadedMasterCoprocessorsVerify = "[" + masterCoprocessor.getSimpleName() + "]"; String loadedMasterCoprocessors = - java.util.Arrays.toString( - TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); + java.util.Arrays.toString(TEST_UTIL.getHBaseCluster().getMaster().getMasterCoprocessors()); assertEquals(loadedMasterCoprocessorsVerify, loadedMasterCoprocessors); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java index 4b8f6c7e8bec..30ca537f93cc 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpoint.java @@ -65,7 +65,7 @@ /** * TestEndpoint: test cases to verify coprocessor Endpoint */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -73,8 +73,7 @@ public class TestCoprocessorEndpoint { private static final Logger LOG = LoggerFactory.getLogger(TestCoprocessorEndpoint.class); - private static final TableName TEST_TABLE = - TableName.valueOf("TestCoprocessorEndpoint"); + private static final TableName TEST_TABLE = TableName.valueOf("TestCoprocessorEndpoint"); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); private static final byte[] TEST_QUALIFIER = Bytes.toBytes("TestQualifier"); private static byte[] ROW = Bytes.toBytes("testRow"); @@ -92,15 +91,15 @@ public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), - ProtobufCoprocessorService.class.getName()); + org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(), + ProtobufCoprocessorService.class.getName()); conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); + ProtobufCoprocessorService.class.getName()); util.startMiniCluster(2); Admin admin = util.getAdmin(); HTableDescriptor desc = new HTableDescriptor(TEST_TABLE); desc.addFamily(new HColumnDescriptor(TEST_FAMILY)); - admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]}); + admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] }); util.waitUntilAllRegionsAssigned(TEST_TABLE); Table table = util.getConnection().getTable(TEST_TABLE); @@ -117,19 +116,17 @@ public static void tearDownAfterClass() throws Exception { util.shutdownMiniCluster(); } - private Map sum(final Table table, final byte [] family, - final byte [] qualifier, final byte [] start, final byte [] end) - throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, - start, end, - new Batch.Call() { + private Map sum(final Table table, final byte[] family, final byte[] qualifier, + final byte[] start, final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, + end, new Batch.Call() { @Override public Long call(ColumnAggregationProtos.ColumnAggregationService instance) - throws IOException { + throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = - ColumnAggregationProtos.SumRequest.newBuilder(); + ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteStringer.wrap(family)); if (qualifier != null && qualifier.length > 0) { builder.setQualifier(ByteStringer.wrap(qualifier)); @@ -143,12 +140,12 @@ public Long call(ColumnAggregationProtos.ColumnAggregationService instance) @Test public void testAggregation() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[0], ROWS[ROWS.length-1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue(); } for (int i = 0; i < ROWSIZE; i++) { @@ -159,12 +156,11 @@ public void testAggregation() throws Throwable { results.clear(); // scan: for region 2 and region 3 - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, - ROWS[rowSeperator1], ROWS[ROWS.length-1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); sumResult += e.getValue(); } for (int i = rowSeperator1; i < ROWSIZE; i++) { @@ -179,22 +175,22 @@ public void testCoprocessorService() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); List regions; - try(RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { + try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { regions = rl.getAllRegionLocations(); } final TestProtos.EchoRequestProto request = TestProtos.EchoRequestProto.newBuilder().setMessage("hello").build(); - final Map results = Collections.synchronizedMap( - new TreeMap(Bytes.BYTES_COMPARATOR)); + final Map results = + Collections.synchronizedMap(new TreeMap(Bytes.BYTES_COMPARATOR)); try { // scan: for all regions final RpcController controller = new ServerRpcController(); - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[0], ROWS[ROWS.length - 1], + table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], + ROWS[ROWS.length - 1], new Batch.Call() { @Override - public TestProtos.EchoResponseProto call( - TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + public TestProtos.EchoResponseProto + call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); @@ -203,33 +199,31 @@ public TestProtos.EchoResponseProto call( LOG.debug("Batch.Call returning result " + response); return response; } - }, - new Batch.Callback() { + }, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { assertNotNull(result); assertEquals("hello", result.getMessage()); results.put(region, result.getMessage()); } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(3, results.size()); for (HRegionLocation info : regions) { - LOG.info("Region info is "+info.getRegionInfo().getRegionNameAsString()); + LOG.info("Region info is " + info.getRegionInfo().getRegionNameAsString()); assertTrue(results.containsKey(info.getRegionInfo().getRegionName())); } results.clear(); // scan: for region 2 and region 3 - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[rowSeperator1], ROWS[ROWS.length - 1], + table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[rowSeperator1], + ROWS[ROWS.length - 1], new Batch.Call() { @Override - public TestProtos.EchoResponseProto call( - TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { + public TestProtos.EchoResponseProto + call(TestRpcServiceProtos.TestProtobufRpcProto instance) throws IOException { LOG.debug("Default response is " + TestProtos.EchoRequestProto.getDefaultInstance()); CoprocessorRpcUtils.BlockingRpcCallback callback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); @@ -238,18 +232,16 @@ public TestProtos.EchoResponseProto call( LOG.debug("Batch.Call returning result " + response); return response; } - }, - new Batch.Callback() { + }, new Batch.Callback() { @Override public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto result) { assertNotNull(result); assertEquals("hello", result.getMessage()); results.put(region, result.getMessage()); } - } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(2, results.size()); } finally { @@ -261,7 +253,7 @@ public void update(byte[] region, byte[] row, TestProtos.EchoResponseProto resul public void testCoprocessorServiceNullResponse() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); List regions; - try(RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { + try (RegionLocator rl = util.getConnection().getRegionLocator(TEST_TABLE)) { regions = rl.getAllRegionLocations(); } @@ -271,28 +263,26 @@ public void testCoprocessorServiceNullResponse() throws Throwable { // scan: for all regions final RpcController controller = new ServerRpcController(); // test that null results are supported - Map results = - table.coprocessorService(TestRpcServiceProtos.TestProtobufRpcProto.class, - ROWS[0], ROWS[ROWS.length - 1], - new Batch.Call() { - public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) - throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.echo(controller, request, callback); - TestProtos.EchoResponseProto response = callback.get(); - LOG.debug("Batch.Call got result " + response); - return null; - } + Map results = table.coprocessorService( + TestRpcServiceProtos.TestProtobufRpcProto.class, ROWS[0], ROWS[ROWS.length - 1], + new Batch.Call() { + public String call(TestRpcServiceProtos.TestProtobufRpcProto instance) + throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback callback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.echo(controller, request, callback); + TestProtos.EchoResponseProto response = callback.get(); + LOG.debug("Batch.Call got result " + response); + return null; } - ); + }); for (Map.Entry e : results.entrySet()) { - LOG.info("Got value "+e.getValue()+" for region "+Bytes.toStringBinary(e.getKey())); + LOG.info("Got value " + e.getValue() + " for region " + Bytes.toStringBinary(e.getKey())); } assertEquals(3, results.size()); for (HRegionLocation region : regions) { HRegionInfo info = region.getRegionInfo(); - LOG.info("Region info is "+info.getRegionNameAsString()); + LOG.info("Region info is " + info.getRegionNameAsString()); assertTrue(results.containsKey(info.getRegionName())); assertNull(results.get(info.getRegionName())); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java index aba63e8f75eb..5d3f96c87433 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorEndpointTracing.java @@ -33,6 +33,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; + import com.google.protobuf.Descriptors; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -90,32 +91,32 @@ import org.junit.rules.TestRule; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.org.apache.commons.collections4.MapUtils; /** * Test cases to verify tracing coprocessor Endpoint execution */ -@Category({ CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorEndpointTracing { private static final Logger logger = - LoggerFactory.getLogger(TestCoprocessorEndpointTracing.class); + LoggerFactory.getLogger(TestCoprocessorEndpointTracing.class); @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCoprocessorEndpointTracing.class); + HBaseClassTestRule.forClass(TestCoprocessorEndpointTracing.class); private static final OpenTelemetryClassRule otelClassRule = OpenTelemetryClassRule.create(); - private static final MiniClusterRule miniclusterRule = MiniClusterRule.newBuilder() - .setConfiguration(() -> { - final Configuration conf = HBaseConfiguration.create(); - conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); - conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); - conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - ProtobufCoprocessorService.class.getName()); - return conf; - }) - .build(); + private static final MiniClusterRule miniclusterRule = + MiniClusterRule.newBuilder().setConfiguration(() -> { + final Configuration conf = HBaseConfiguration.create(); + conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000); + conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, + ProtobufCoprocessorService.class.getName()); + conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, + ProtobufCoprocessorService.class.getName()); + return conf; + }).build(); private static final ConnectionRule connectionRule = ConnectionRule.createConnectionRule( miniclusterRule::createConnection, miniclusterRule::createAsyncConnection); @@ -126,20 +127,18 @@ protected void before() throws Throwable { final AsyncConnection connection = connectionRule.getAsyncConnection(); final AsyncAdmin admin = connection.getAdmin(); final TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TEST_TABLE) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(TEST_FAMILY)).build(); admin.createTable(tableDescriptor).get(); util.waitUntilAllRegionsAssigned(TEST_TABLE); } } @ClassRule - public static final TestRule testRule = RuleChain.outerRule(otelClassRule) - .around(miniclusterRule) - .around(connectionRule) - .around(new Setup()); + public static final TestRule testRule = RuleChain.outerRule(otelClassRule).around(miniclusterRule) + .around(connectionRule).around(new Setup()); private static final TableName TEST_TABLE = - TableName.valueOf(TestCoprocessorEndpointTracing.class.getSimpleName()); + TableName.valueOf(TestCoprocessorEndpointTracing.class.getSimpleName()); private static final byte[] TEST_FAMILY = Bytes.toBytes("TestFamily"); @Rule @@ -155,42 +154,41 @@ public void traceAsyncTableEndpoint() { final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final CompletableFuture> future = new CompletableFuture<>(); final AsyncTable.CoprocessorCallback callback = - new AsyncTable.CoprocessorCallback() { - final ConcurrentMap results = new ConcurrentHashMap<>(); - - @Override - public void onRegionComplete(RegionInfo region, EchoResponseProto resp) { - if (!future.isDone()) { - results.put(region.getRegionName(), resp.getMessage()); + new AsyncTable.CoprocessorCallback() { + final ConcurrentMap results = new ConcurrentHashMap<>(); + + @Override + public void onRegionComplete(RegionInfo region, EchoResponseProto resp) { + if (!future.isDone()) { + results.put(region.getRegionName(), resp.getMessage()); + } } - } - @Override - public void onRegionError(RegionInfo region, Throwable error) { - if (!future.isDone()) { - future.completeExceptionally(error); + @Override + public void onRegionError(RegionInfo region, Throwable error) { + if (!future.isDone()) { + future.completeExceptionally(error); + } } - } - @Override - public void onComplete() { - if (!future.isDone()) { - future.complete(results); + @Override + public void onComplete() { + if (!future.isDone()) { + future.complete(results); + } } - } - @Override - public void onError(Throwable error) { - if (!future.isDone()) { - future.completeExceptionally(error); + @Override + public void onError(Throwable error) { + if (!future.isDone()) { + future.completeExceptionally(error); + } } - } - }; + }; final Map results = TraceUtil.trace(() -> { table.coprocessorService(TestProtobufRpcProto::newStub, - (stub, controller, cb) -> stub.echo(controller, request, cb), callback) - .execute(); + (stub, controller, cb) -> stub.echo(controller, request, cb), callback).execute(); try { return future.get(); } catch (InterruptedException | ExecutionException e) { @@ -199,31 +197,21 @@ public void onError(Throwable error) { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - equalTo("hello")))); + assertThat(results.values(), everyItem(allOf(notNullValue(), equalTo("hello")))); final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -234,48 +222,36 @@ public void traceSyncTableEndpointCall() throws Exception { final RpcController controller = new ServerRpcController(); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); final Map results = TraceUtil.trace(() -> { try { - return table.coprocessorService(TestProtobufRpcProto.class, null, null, - t -> { - t.echo(controller, request, callback); - return callback.get(); - }); + return table.coprocessorService(TestProtobufRpcProto.class, null, null, t -> { + t.echo(controller, request, callback); + return callback.get(); + }); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -286,52 +262,39 @@ public void traceSyncTableEndpointCallAndCallback() throws Exception { final RpcController controller = new ServerRpcController(); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final CoprocessorRpcUtils.BlockingRpcCallback callback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); final ConcurrentMap results = new ConcurrentHashMap<>(); TraceUtil.trace(() -> { try { - table.coprocessorService(TestProtobufRpcProto.class, null, null, - t -> { - t.echo(controller, request, callback); - return callback.get(); - }, - (region, row, result) -> { - results.put(region, result); - }); + table.coprocessorService(TestProtobufRpcProto.class, null, null, t -> { + t.echo(controller, request, callback); + return callback.get(); + }, (region, row, result) -> { + results.put(region, result); + }); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/ExecService"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/ExecService"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -344,7 +307,7 @@ public void traceSyncTableRegionCoprocessorRpcChannel() throws Exception { try { final CoprocessorRpcChannel channel = table.coprocessorService(new byte[] {}); final TestProtobufRpcProto.BlockingInterface service = - TestProtobufRpcProto.newBlockingStub(channel); + TestProtobufRpcProto.newBlockingStub(channel); return service.echo(null, request); } catch (Throwable t) { throw new RuntimeException(t); @@ -354,9 +317,7 @@ public void traceSyncTableRegionCoprocessorRpcChannel() throws Exception { assertEquals("hello", response.getMessage()); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); @@ -365,13 +326,10 @@ public void traceSyncTableRegionCoprocessorRpcChannel() throws Exception { * The Table instance isn't issuing a command here, it's not a table operation, so don't expect * there to be a span like `COPROC_EXEC table`. */ - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = + allOf(hasName(containsString("COPROC_EXEC")), hasParentSpanId(testSpan)); assertThat(spans, not(hasItem(tableOpMatcher))); } @@ -380,45 +338,34 @@ public void traceSyncTableBatchEndpoint() throws Exception { final Connection connection = connectionRule.getConnection(); try (final Table table = connection.getTable(TEST_TABLE)) { final Descriptors.MethodDescriptor descriptor = - TestProtobufRpcProto.getDescriptor().findMethodByName("echo"); + TestProtobufRpcProto.getDescriptor().findMethodByName("echo"); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final Map response = TraceUtil.trace(() -> { try { - return table.batchCoprocessorService( - descriptor, request, null, null, EchoResponseProto.getDefaultInstance()); + return table.batchCoprocessorService(descriptor, request, null, null, + EchoResponseProto.getDefaultInstance()); } catch (Throwable t) { throw new RuntimeException(t); } }, testName.getMethodName()); assertNotNull(response); - assertThat(response.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(response.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/Multi"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/Multi"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -427,7 +374,7 @@ public void traceSyncTableBatchEndpointCallback() throws Exception { final Connection connection = connectionRule.getConnection(); try (final Table table = connection.getTable(TEST_TABLE)) { final Descriptors.MethodDescriptor descriptor = - TestProtobufRpcProto.getDescriptor().findMethodByName("echo"); + TestProtobufRpcProto.getDescriptor().findMethodByName("echo"); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final ConcurrentMap results = new ConcurrentHashMap<>(); TraceUtil.trace(() -> { @@ -440,34 +387,23 @@ public void traceSyncTableBatchEndpointCallback() throws Exception { }, testName.getMethodName()); assertNotNull(results); assertTrue("coprocessor call returned no results.", MapUtils.isNotEmpty(results)); - assertThat(results.values(), everyItem(allOf( - notNullValue(), - hasProperty("message", equalTo("hello"))))); + assertThat(results.values(), + everyItem(allOf(notNullValue(), hasProperty("message", equalTo("hello"))))); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher tableOpMatcher = allOf( - hasName(containsString("COPROC_EXEC")), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher tableOpMatcher = allOf(hasName(containsString("COPROC_EXEC")), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(tableOpMatcher)); - final SpanData tableOpSpan = spans.stream() - .filter(tableOpMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.ClientService/Multi"), - hasParentSpanId(tableOpSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData tableOpSpan = + spans.stream().filter(tableOpMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.ClientService/Multi"), + hasParentSpanId(tableOpSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -477,29 +413,22 @@ public void traceAsyncAdminEndpoint() throws Exception { final AsyncAdmin admin = connection.getAdmin(); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final ServiceCaller callback = - (stub, controller, cb) -> stub.echo(controller, request, cb); + (stub, controller, cb) -> stub.echo(controller, request, cb); - final String response = TraceUtil.tracedFuture( - () -> admin.coprocessorService(TestProtobufRpcProto::newStub, callback), - testName.getMethodName()) - .get() - .getMessage(); + final String response = TraceUtil + .tracedFuture(() -> admin.coprocessorService(TestProtobufRpcProto::newStub, callback), + testName.getMethodName()) + .get().getMessage(); assertEquals("hello", response); - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.MasterService/ExecMasterService"), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.MasterService/ExecMasterService"), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } @@ -508,7 +437,7 @@ public void traceSyncAdminEndpoint() throws Exception { final Connection connection = connectionRule.getConnection(); try (final Admin admin = connection.getAdmin()) { final TestProtobufRpcProto.BlockingInterface service = - TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); + TestProtobufRpcProto.newBlockingStub(admin.coprocessorService()); final EchoRequestProto request = EchoRequestProto.newBuilder().setMessage("hello").build(); final String response = TraceUtil.trace(() -> { try { @@ -520,27 +449,21 @@ public void traceSyncAdminEndpoint() throws Exception { assertEquals("hello", response); } - final Matcher parentMatcher = allOf( - hasName(testName.getMethodName()), - hasEnded()); + final Matcher parentMatcher = allOf(hasName(testName.getMethodName()), hasEnded()); waitForAndLog(parentMatcher); final List spans = otelClassRule.getSpans(); - final SpanData testSpan = spans.stream() - .filter(parentMatcher::matches) - .findFirst() - .orElseThrow(AssertionError::new); - final Matcher rpcMatcher = allOf( - hasName("hbase.pb.MasterService/ExecMasterService"), - hasParentSpanId(testSpan), - hasStatusWithCode(StatusCode.OK)); + final SpanData testSpan = + spans.stream().filter(parentMatcher::matches).findFirst().orElseThrow(AssertionError::new); + final Matcher rpcMatcher = allOf(hasName("hbase.pb.MasterService/ExecMasterService"), + hasParentSpanId(testSpan), hasStatusWithCode(StatusCode.OK)); assertThat(spans, hasItem(rpcMatcher)); } private void waitForAndLog(Matcher spanMatcher) { final Configuration conf = connectionRule.getAsyncConnection().getConfiguration(); - Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), new MatcherPredicate<>( - otelClassRule::getSpans, hasItem(spanMatcher))); + Waiter.waitFor(conf, TimeUnit.SECONDS.toMillis(5), + new MatcherPredicate<>(otelClassRule::getSpans, hasItem(spanMatcher))); final List spans = otelClassRule.getSpans(); if (logger.isDebugEnabled()) { StringTraceRenderer renderer = new StringTraceRenderer(spans); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java index d1e848dbc620..9f82ee23b39d 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorServiceBackwardCompatibility.java @@ -40,7 +40,7 @@ /** * Tests to ensure that 2.0 is backward compatible in loading CoprocessorService. */ -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestCoprocessorServiceBackwardCompatibility { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -92,11 +92,11 @@ public static void setupBeforeClass() throws Exception { TEST_UTIL = new HBaseTestingUtility(); CONF = TEST_UTIL.getConfiguration(); CONF.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, - DummyCoprocessorService.class.getName()); + DummyCoprocessorService.class.getName()); CONF.setStrings(CoprocessorHost.REGIONSERVER_COPROCESSOR_CONF_KEY, - DummyCoprocessorService.class.getName()); + DummyCoprocessorService.class.getName()); CONF.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - DummyCoprocessorService.class.getName()); + DummyCoprocessorService.class.getName()); TEST_UTIL.startMiniCluster(); } @@ -108,21 +108,21 @@ public static void tearDownAfter() throws Exception { @Test public void testCoprocessorServiceLoadedByMaster() throws Throwable { TEST_UTIL.getAdmin().coprocessorService().callBlockingMethod( - DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null, - DummyRequest.newBuilder().setValue(MASTER).build(), DummyResponse.getDefaultInstance()); + DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null, + DummyRequest.newBuilder().setValue(MASTER).build(), DummyResponse.getDefaultInstance()); assertEquals(MASTER, DummyCoprocessorService.numMaster); - TEST_UTIL.getAdmin().coprocessorService( - TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName()).callBlockingMethod( - DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), null, - DummyRequest.newBuilder().setValue(REGIONSERVER).build(), - DummyResponse.getDefaultInstance()); + TEST_UTIL.getAdmin() + .coprocessorService(TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName()) + .callBlockingMethod(DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), + null, DummyRequest.newBuilder().setValue(REGIONSERVER).build(), + DummyResponse.getDefaultInstance()); assertEquals(REGIONSERVER, DummyCoprocessorService.numRegionServer); TEST_UTIL.getConnection().getTable(TableName.valueOf("hbase:meta")).batchCoprocessorService( - DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), - DummyRequest.newBuilder().setValue(REGION).build(), Bytes.toBytes(""), Bytes.toBytes(""), - DummyResponse.getDefaultInstance()); + DummyCoprocessorService.getDescriptor().findMethodByName("dummyCall"), + DummyRequest.newBuilder().setValue(REGION).build(), Bytes.toBytes(""), Bytes.toBytes(""), + DummyResponse.getDefaultInstance()); assertEquals(REGION, DummyCoprocessorService.numRegion); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java index 3bec2034a129..740c4742cde9 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestCoprocessorTableEndpoint.java @@ -45,7 +45,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestCoprocessorTableEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -109,19 +109,18 @@ private static byte[][] makeN(byte[] base, int n) { return ret; } - private static Map sum(final Table table, final byte [] family, - final byte [] qualifier, final byte [] start, final byte [] end) + private static Map sum(final Table table, final byte[] family, + final byte[] qualifier, final byte[] start, final byte[] end) throws ServiceException, Throwable { - return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, - start, end, - new Batch.Call() { + return table.coprocessorService(ColumnAggregationProtos.ColumnAggregationService.class, start, + end, new Batch.Call() { @Override public Long call(ColumnAggregationProtos.ColumnAggregationService instance) - throws IOException { + throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); ColumnAggregationProtos.SumRequest.Builder builder = - ColumnAggregationProtos.SumRequest.newBuilder(); + ColumnAggregationProtos.SumRequest.newBuilder(); builder.setFamily(ByteString.copyFrom(family)); if (qualifier != null && qualifier.length > 0) { builder.setQualifier(ByteString.copyFrom(qualifier)); @@ -134,7 +133,7 @@ public Long call(ColumnAggregationProtos.ColumnAggregationService instance) private static final void createTable(HTableDescriptor desc) throws Exception { Admin admin = TEST_UTIL.getAdmin(); - admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]}); + admin.createTable(desc, new byte[][] { ROWS[rowSeperator1], ROWS[rowSeperator2] }); TEST_UTIL.waitUntilAllRegionsAssigned(desc.getTableName()); Table table = TEST_UTIL.getConnection().getTable(desc.getTableName()); try { @@ -158,8 +157,8 @@ private static void updateTable(HTableDescriptor desc) throws Exception { private static final void verifyTable(TableName tableName) throws Throwable { Table table = TEST_UTIL.getConnection().getTable(tableName); try { - Map results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], - ROWS[ROWS.length-1]); + Map results = + sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[0], ROWS[ROWS.length - 1]); int sumResult = 0; int expectedResult = 0; for (Map.Entry e : results.entrySet()) { @@ -172,7 +171,7 @@ private static final void verifyTable(TableName tableName) throws Throwable { // scan: for region 2 and region 3 results.clear(); - results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length-1]); + results = sum(table, TEST_FAMILY, TEST_QUALIFIER, ROWS[rowSeperator1], ROWS[ROWS.length - 1]); sumResult = 0; expectedResult = 0; for (Map.Entry e : results.entrySet()) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java index 8a4c7b21b553..007bcf64c3b5 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestImportExport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestImportExport extends org.apache.hadoop.hbase.mapreduce.TestImportExport { @ClassRule diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java index f1808845d0a5..1faa5342c119 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRegionServerCoprocessorEndpoint.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRegionServerCoprocessorEndpoint { @ClassRule @@ -75,13 +75,13 @@ public static void tearDownAfterClass() throws Exception { public void testEndpoint() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); - final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + final CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, TEST_UTIL.getAdmin().coprocessorService(serverName)); - service.dummyCall(controller, - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); + service.dummyCall(controller, DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), + rpcCallback); assertEquals(DUMMY_VALUE, rpcCallback.get().getValue()); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -92,17 +92,17 @@ public void testEndpoint() throws Exception { public void testEndpointExceptions() throws Exception { final ServerName serverName = TEST_UTIL.getHBaseCluster().getRegionServer(0).getServerName(); final ServerRpcController controller = new ServerRpcController(); - final CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); + final CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); DummyRegionServerEndpointProtos.DummyService service = ProtobufUtil.newServiceStub(DummyRegionServerEndpointProtos.DummyService.class, - TEST_UTIL.getAdmin().coprocessorService(serverName)); + TEST_UTIL.getAdmin().coprocessorService(serverName)); service.dummyThrow(controller, - DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); + DummyRegionServerEndpointProtos.DummyRequest.getDefaultInstance(), rpcCallback); assertEquals(null, rpcCallback.get()); assertTrue(controller.failedOnException()); assertEquals(WHAT_TO_THROW.getClass().getName().trim(), - ((RemoteWithExtrasException) controller.getFailedOn().getCause()).getClassName().trim()); + ((RemoteWithExtrasException) controller.getFailedOn().getCause()).getClassName().trim()); } public static class DummyRegionServerEndpoint extends DummyService @@ -120,8 +120,7 @@ public void dummyCall(RpcController controller, DummyRequest request, } @Override - public void dummyThrow(RpcController controller, - DummyRequest request, + public void dummyThrow(RpcController controller, DummyRequest request, RpcCallback done) { CoprocessorRpcUtils.setControllerException(controller, WHAT_TO_THROW); } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java index e788e5d11f5d..37e098d31d43 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestRowProcessorEndpoint.java @@ -79,10 +79,10 @@ import org.slf4j.LoggerFactory; /** - * Verifies ProcessEndpoint works. - * The tested RowProcessor performs two scans and a read-modify-write. + * Verifies ProcessEndpoint works. The tested RowProcessor performs two scans and a + * read-modify-write. */ -@Category({CoprocessorTests.class, MediumTests.class}) +@Category({ CoprocessorTests.class, MediumTests.class }) public class TestRowProcessorEndpoint { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -120,7 +120,7 @@ public class TestRowProcessorEndpoint { public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - RowProcessorEndpoint.class.getName()); + RowProcessorEndpoint.class.getName()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); conf.setLong("hbase.hregion.row.processor.timeout", 1000L); conf.setLong(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, 2048); @@ -142,9 +142,9 @@ public void prepareTestData() throws Exception { table = util.createTable(TABLE, FAM); { Put put = new Put(ROW); - put.addColumn(FAM, A, Bytes.add(B, C)); // B, C are friends of A + put.addColumn(FAM, A, Bytes.add(B, C)); // B, C are friends of A put.addColumn(FAM, B, Bytes.add(D, E, F)); // D, E, F are friends of B - put.addColumn(FAM, C, G); // G is a friend of C + put.addColumn(FAM, C, G); // G is a friend of C table.put(put); rowSize = put.size(); } @@ -162,15 +162,14 @@ public void testDoubleScan() throws Throwable { CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.FriendsOfFriendsProcessor processor = new RowProcessorEndpoint.FriendsOfFriendsProcessor(ROW, A); - RowProcessorService.BlockingInterface service = - RowProcessorService.newBlockingStub(channel); + RowProcessorService.BlockingInterface service = RowProcessorService.newBlockingStub(channel); ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor); ProcessResponse protoResult = service.process(null, request); FriendsOfFriendsProcessorResponse response = FriendsOfFriendsProcessorResponse.parseFrom(protoResult.getRowProcessorResult()); Set result = new HashSet<>(); result.addAll(response.getResultList()); - Set expected = new HashSet<>(Arrays.asList(new String[]{"d", "e", "f", "g"})); + Set expected = new HashSet<>(Arrays.asList(new String[] { "d", "e", "f", "g" })); Get get = new Get(ROW); LOG.debug("row keyvalues:" + stringifyKvs(table.get(get).listCells())); assertEquals(expected, result); @@ -208,12 +207,11 @@ private int incrementCounter(Table table) throws Throwable { CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.IncrementCounterProcessor processor = new RowProcessorEndpoint.IncrementCounterProcessor(ROW); - RowProcessorService.BlockingInterface service = - RowProcessorService.newBlockingStub(channel); + RowProcessorService.BlockingInterface service = RowProcessorService.newBlockingStub(channel); ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor); ProcessResponse protoResult = service.process(null, request); - IncCounterProcessorResponse response = IncCounterProcessorResponse - .parseFrom(protoResult.getRowProcessorResult()); + IncCounterProcessorResponse response = + IncCounterProcessorResponse.parseFrom(protoResult.getRowProcessorResult()); Integer result = response.getResponse(); return result; } @@ -246,10 +244,8 @@ public void testMultipleRows() throws Throwable { failures.set(0); int numThreads = 100; concurrentExec(new SwapRowsRunner(), numThreads); - LOG.debug("row keyvalues:" + - stringifyKvs(table.get(new Get(ROW)).listCells())); - LOG.debug("row2 keyvalues:" + - stringifyKvs(table.get(new Get(ROW2)).listCells())); + LOG.debug("row keyvalues:" + stringifyKvs(table.get(new Get(ROW)).listCells())); + LOG.debug("row2 keyvalues:" + stringifyKvs(table.get(new Get(ROW2)).listCells())); int failureNumber = failures.get(); if (failureNumber > 0) { LOG.debug("We failed " + failureNumber + " times during test"); @@ -279,8 +275,7 @@ private void swapRows(Table table) throws Throwable { CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.RowSwapProcessor processor = new RowProcessorEndpoint.RowSwapProcessor(ROW, ROW2); - RowProcessorService.BlockingInterface service = - RowProcessorService.newBlockingStub(channel); + RowProcessorService.BlockingInterface service = RowProcessorService.newBlockingStub(channel); ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor); service.process(null, request); } @@ -291,8 +286,7 @@ public void testTimeout() throws Throwable { CoprocessorRpcChannel channel = table.coprocessorService(ROW); RowProcessorEndpoint.TimeoutProcessor processor = new RowProcessorEndpoint.TimeoutProcessor(ROW); - RowProcessorService.BlockingInterface service = - RowProcessorService.newBlockingStub(channel); + RowProcessorService.BlockingInterface service = RowProcessorService.newBlockingStub(channel); ProcessRequest request = RowProcessorClient.getRowProcessorPB(processor); boolean exceptionCaught = false; try { @@ -304,17 +298,14 @@ public void testTimeout() throws Throwable { } /** - * This class defines two RowProcessors: - * IncrementCounterProcessor and FriendsOfFriendsProcessor. - * - * We define the RowProcessors as the inner class of the endpoint. - * So they can be loaded with the endpoint on the coprocessor. + * This class defines two RowProcessors: IncrementCounterProcessor and FriendsOfFriendsProcessor. + * We define the RowProcessors as the inner class of the endpoint. So they can be loaded with the + * endpoint on the coprocessor. */ - public static class RowProcessorEndpoint - extends BaseRowProcessorEndpoint { + public static class RowProcessorEndpoint + extends BaseRowProcessorEndpoint { public static class IncrementCounterProcessor extends - BaseRowProcessor { + BaseRowProcessor { int counter = 0; byte[] row = new byte[0]; @@ -346,15 +337,14 @@ public boolean readOnly() { } @Override - public void process(long now, HRegion region, - List mutations, WALEdit walEdit) throws IOException { + public void process(long now, HRegion region, List mutations, WALEdit walEdit) + throws IOException { // Scan current counter List kvs = new ArrayList<>(); Scan scan = new Scan(row, row); scan.addColumn(FAM, COUNTER); doScan(region, scan, kvs); - counter = kvs.isEmpty() ? 0 : - Bytes.toInt(CellUtil.cloneValue(kvs.iterator().next())); + counter = kvs.isEmpty() ? 0 : Bytes.toInt(CellUtil.cloneValue(kvs.iterator().next())); // Assert counter value assertEquals(expectedCounter, counter); @@ -363,19 +353,15 @@ public void process(long now, HRegion region, counter += 1; expectedCounter += 1; - Put p = new Put(row); - KeyValue kv = - new KeyValue(row, FAM, COUNTER, now, Bytes.toBytes(counter)); + KeyValue kv = new KeyValue(row, FAM, COUNTER, now, Bytes.toBytes(counter)); p.add(kv); mutations.add(p); walEdit.add(kv); // We can also inject some meta data to the walEdit - KeyValue metaKv = new KeyValue( - row, WALEdit.METAFAMILY, - Bytes.toBytes("I just increment counter"), - Bytes.toBytes(counter)); + KeyValue metaKv = new KeyValue(row, WALEdit.METAFAMILY, + Bytes.toBytes("I just increment counter"), Bytes.toBytes(counter)); walEdit.add(metaKv); } @@ -395,7 +381,7 @@ public void initialize(IncCounterProcessorRequest msg) { } public static class FriendsOfFriendsProcessor extends - BaseRowProcessor { + BaseRowProcessor { byte[] row = null; byte[] person = null; final Set result = new HashSet<>(); @@ -430,8 +416,8 @@ public boolean readOnly() { } @Override - public void process(long now, HRegion region, - List mutations, WALEdit walEdit) throws IOException { + public void process(long now, HRegion region, List mutations, WALEdit walEdit) + throws IOException { List kvs = new ArrayList<>(); { // First scan to get friends of the person Scan scan = new Scan(row, row); @@ -444,7 +430,7 @@ public void process(long now, HRegion region, for (Cell kv : kvs) { byte[] friends = CellUtil.cloneValue(kv); for (byte f : friends) { - scan.addColumn(FAM, new byte[]{f}); + scan.addColumn(FAM, new byte[] { f }); } } doScan(region, scan, kvs); @@ -453,7 +439,7 @@ public void process(long now, HRegion region, result.clear(); for (Cell kv : kvs) { for (byte b : CellUtil.cloneValue(kv)) { - result.add((char)b + ""); + result.add((char) b + ""); } } } @@ -470,8 +456,7 @@ public FriendsOfFriendsProcessorRequest getRequestData() throws IOException { } @Override - public void initialize(FriendsOfFriendsProcessorRequest request) - throws IOException { + public void initialize(FriendsOfFriendsProcessorRequest request) throws IOException { this.person = request.getPerson().toByteArray(); this.row = request.getRow().toByteArray(); result.clear(); @@ -479,8 +464,8 @@ public void initialize(FriendsOfFriendsProcessorRequest request) } } - public static class RowSwapProcessor extends - BaseRowProcessor { + public static class RowSwapProcessor + extends BaseRowProcessor { byte[] row1 = new byte[0]; byte[] row2 = new byte[0]; @@ -514,8 +499,8 @@ public RowSwapProcessorResponse getResult() { } @Override - public void process(long now, HRegion region, - List mutations, WALEdit walEdit) throws IOException { + public void process(long now, HRegion region, List mutations, WALEdit walEdit) + throws IOException { // Override the time to avoid race-condition in the unit test caused by // inacurate timer on some machines @@ -541,19 +526,17 @@ public void process(long now, HRegion region, List> kvs = new ArrayList<>(2); kvs.add(kvs1); kvs.add(kvs2); - byte[][] rows = new byte[][]{row1, row2}; + byte[][] rows = new byte[][] { row1, row2 }; for (int i = 0; i < kvs.size(); ++i) { for (Cell kv : kvs.get(i)) { // Delete from the current row and add to the other row Delete d = new Delete(rows[i]); - KeyValue kvDelete = - new KeyValue(rows[i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), - kv.getTimestamp(), KeyValue.Type.Delete); + KeyValue kvDelete = new KeyValue(rows[i], CellUtil.cloneFamily(kv), + CellUtil.cloneQualifier(kv), kv.getTimestamp(), KeyValue.Type.Delete); d.add(kvDelete); Put p = new Put(rows[1 - i]); - KeyValue kvAdd = - new KeyValue(rows[1 - i], CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), - now, CellUtil.cloneValue(kv)); + KeyValue kvAdd = new KeyValue(rows[1 - i], CellUtil.cloneFamily(kv), + CellUtil.cloneQualifier(kv), now, CellUtil.cloneValue(kv)); p.add(kvAdd); mutations.add(d); walEdit.add(kvDelete); @@ -583,8 +566,8 @@ public void initialize(RowSwapProcessorRequest msg) { } } - public static class TimeoutProcessor extends - BaseRowProcessor { + public static class TimeoutProcessor + extends BaseRowProcessor { byte[] row = new byte[0]; /** @@ -607,8 +590,8 @@ public TimeoutProcessorResponse getResult() { } @Override - public void process(long now, HRegion region, - List mutations, WALEdit walEdit) throws IOException { + public void process(long now, HRegion region, List mutations, WALEdit walEdit) + throws IOException { try { // Sleep for a long time so it timeout Thread.sleep(100 * 1000L); @@ -663,11 +646,9 @@ static String stringifyKvs(Collection kvs) { byte[] col = CellUtil.cloneQualifier(kv); byte[] val = CellUtil.cloneValue(kv); if (Bytes.equals(col, COUNTER)) { - out.append(Bytes.toStringBinary(col) + ":" + - Bytes.toInt(val) + " "); + out.append(Bytes.toStringBinary(col) + ":" + Bytes.toInt(val) + " "); } else { - out.append(Bytes.toStringBinary(col) + ":" + - Bytes.toStringBinary(val) + " "); + out.append(Bytes.toStringBinary(col) + ":" + Bytes.toStringBinary(val) + " "); } } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java index 08e127fdab9c..66f07149d220 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/coprocessor/TestSecureExport.java @@ -82,7 +82,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestSecureExport { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -123,25 +123,20 @@ public class TestSecureExport { private static final String TOPSECRET = "topsecret"; @Rule public final TestName name = new TestName(); + private static void setUpKdcServer() throws Exception { KDC = UTIL.setupMiniKdc(KEYTAB_FILE); USERNAME = UserGroupInformation.getLoginUser().getShortUserName(); SERVER_PRINCIPAL = USERNAME + "/" + LOCALHOST; HTTP_PRINCIPAL = "HTTP/" + LOCALHOST; - KDC.createPrincipal(KEYTAB_FILE, - SERVER_PRINCIPAL, - HTTP_PRINCIPAL, - USER_ADMIN + "/" + LOCALHOST, - USER_OWNER + "/" + LOCALHOST, - USER_RX + "/" + LOCALHOST, - USER_RO + "/" + LOCALHOST, - USER_XO + "/" + LOCALHOST, - USER_NONE + "/" + LOCALHOST); + KDC.createPrincipal(KEYTAB_FILE, SERVER_PRINCIPAL, HTTP_PRINCIPAL, USER_ADMIN + "/" + LOCALHOST, + USER_OWNER + "/" + LOCALHOST, USER_RX + "/" + LOCALHOST, USER_RO + "/" + LOCALHOST, + USER_XO + "/" + LOCALHOST, USER_NONE + "/" + LOCALHOST); } private static User getUserByLogin(final String user) throws IOException { - return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI( - getPrinciple(user), KEYTAB_FILE.getAbsolutePath())); + return User.create(UserGroupInformation.loginUserFromKeytabAndReturnUGI(getPrinciple(user), + KEYTAB_FILE.getAbsolutePath())); } private static String getPrinciple(final String user) { @@ -150,28 +145,27 @@ private static String getPrinciple(final String user) { private static void setUpClusterKdc() throws Exception { HBaseKerberosUtils.setSecuredConfiguration(UTIL.getConfiguration(), - SERVER_PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); + SERVER_PRINCIPAL + "@" + KDC.getRealm(), HTTP_PRINCIPAL + "@" + KDC.getRealm()); HBaseKerberosUtils.setSSLConfiguration(UTIL, TestSecureExport.class); UTIL.getConfiguration().set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, - UTIL.getConfiguration().get( - CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + Export.class.getName()); + UTIL.getConfiguration().get(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY) + "," + + Export.class.getName()); } private static void addLabels(final Configuration conf, final List users, final List labels) throws Exception { - PrivilegedExceptionAction action - = () -> { - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels.toArray(new String[labels.size()])); - for (String user : users) { - VisibilityClient.setAuths(conn, labels.toArray(new String[labels.size()]), user); - } - } catch (Throwable t) { - throw new IOException(t); + PrivilegedExceptionAction action = () -> { + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels.toArray(new String[labels.size()])); + for (String user : users) { + VisibilityClient.setAuths(conn, labels.toArray(new String[labels.size()]), user); } - return null; - }; + } catch (Throwable t) { + throw new IOException(t); + } + return null; + }; getUserByLogin(USER_ADMIN).runAs(action); } @@ -197,7 +191,7 @@ private static void clearOutput(Path path) throws IOException { @BeforeClass public static void beforeClass() throws Exception { UserProvider.setUserProviderForTesting(UTIL.getConfiguration(), - HadoopSecurityEnabledUserProviderForTesting.class); + HadoopSecurityEnabledUserProviderForTesting.class); setUpKdcServer(); SecureTestUtil.enableSecurity(UTIL.getConfiguration()); UTIL.getConfiguration().setBoolean(AccessControlConstants.EXEC_PERMISSION_CHECKS_KEY, true); @@ -209,14 +203,10 @@ public static void beforeClass() throws Exception { UTIL.waitUntilAllRegionsAssigned(VisibilityConstants.LABELS_TABLE_NAME); UTIL.waitTableEnabled(PermissionStorage.ACL_TABLE_NAME, 50000); UTIL.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME, 50000); - SecureTestUtil.grantGlobal(UTIL, USER_ADMIN, - Permission.Action.ADMIN, - Permission.Action.CREATE, - Permission.Action.EXEC, - Permission.Action.READ, - Permission.Action.WRITE); + SecureTestUtil.grantGlobal(UTIL, USER_ADMIN, Permission.Action.ADMIN, Permission.Action.CREATE, + Permission.Action.EXEC, Permission.Action.READ, Permission.Action.WRITE); addLabels(UTIL.getConfiguration(), Arrays.asList(USER_OWNER), - Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); + Arrays.asList(PRIVATE, CONFIDENTIAL, SECRET, TOPSECRET)); } @AfterClass @@ -228,28 +218,23 @@ public static void afterClass() throws Exception { } /** - * Test the ExportEndpoint's access levels. The {@link Export} test is ignored - * since the access exceptions cannot be collected from the mappers. + * Test the ExportEndpoint's access levels. The {@link Export} test is ignored since the access + * exceptions cannot be collected from the mappers. */ @Test public void testAccessCase() throws Throwable { final String exportTable = name.getMethodName(); - TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) + TableDescriptor exportHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)).setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); - SecureTestUtil.grantOnTable(UTIL, USER_RO, - TableName.valueOf(exportTable), null, null, - Permission.Action.READ); - SecureTestUtil.grantOnTable(UTIL, USER_RX, - TableName.valueOf(exportTable), null, null, - Permission.Action.READ, - Permission.Action.EXEC); - SecureTestUtil.grantOnTable(UTIL, USER_XO, - TableName.valueOf(exportTable), null, null, - Permission.Action.EXEC); + SecureTestUtil.createTable(UTIL, exportHtd, new byte[][] { Bytes.toBytes("s") }); + SecureTestUtil.grantOnTable(UTIL, USER_RO, TableName.valueOf(exportTable), null, null, + Permission.Action.READ); + SecureTestUtil.grantOnTable(UTIL, USER_RX, TableName.valueOf(exportTable), null, null, + Permission.Action.READ, Permission.Action.EXEC); + SecureTestUtil.grantOnTable(UTIL, USER_XO, TableName.valueOf(exportTable), null, null, + Permission.Action.EXEC); assertEquals(4, PermissionStorage .getTablePermissions(UTIL.getConfiguration(), TableName.valueOf(exportTable)).size()); AccessTestAction putAction = () -> { @@ -257,20 +242,15 @@ public void testAccessCase() throws Throwable { p.addColumn(FAMILYA, Bytes.toBytes("qual_0"), NOW, QUAL); p.addColumn(FAMILYA, Bytes.toBytes("qual_1"), NOW, QUAL); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table t = conn.getTable(TableName.valueOf(exportTable))) { + Table t = conn.getTable(TableName.valueOf(exportTable))) { t.put(p); } return null; }; // no hdfs access. - SecureTestUtil.verifyAllowed(putAction, - getUserByLogin(USER_ADMIN), - getUserByLogin(USER_OWNER)); - SecureTestUtil.verifyDenied(putAction, - getUserByLogin(USER_RO), - getUserByLogin(USER_XO), - getUserByLogin(USER_RX), - getUserByLogin(USER_NONE)); + SecureTestUtil.verifyAllowed(putAction, getUserByLogin(USER_ADMIN), getUserByLogin(USER_OWNER)); + SecureTestUtil.verifyDenied(putAction, getUserByLogin(USER_RO), getUserByLogin(USER_XO), + getUserByLogin(USER_RX), getUserByLogin(USER_NONE)); final FileSystem fs = UTIL.getDFSCluster().getFileSystem(); final Path openDir = fs.makeQualified(new Path("testAccessCase")); @@ -279,9 +259,9 @@ public void testAccessCase() throws Throwable { final Path output = fs.makeQualified(new Path(openDir, "output")); AccessTestAction exportAction = () -> { try { - String[] args = new String[]{exportTable, output.toString()}; - Map result - = Export.run(new Configuration(UTIL.getConfiguration()), args); + String[] args = new String[] { exportTable, output.toString() }; + Map result = + Export.run(new Configuration(UTIL.getConfiguration()), args); long rowCount = 0; long cellCount = 0; for (Export.Response r : result.values()) { @@ -305,7 +285,7 @@ public void testAccessCase() throws Throwable { assertEquals("Unexpected file owner", currentUserName, outputDirFileStatus.getOwner()); FileStatus[] outputFileStatus = fs.listStatus(new Path(openDir, "output")); - for (FileStatus fileStatus: outputFileStatus) { + for (FileStatus fileStatus : outputFileStatus) { assertEquals("Unexpected file owner", currentUserName, fileStatus.getOwner()); } } else { @@ -315,14 +295,10 @@ public void testAccessCase() throws Throwable { clearOutput(output); } }; - SecureTestUtil.verifyDenied(exportAction, - getUserByLogin(USER_RO), - getUserByLogin(USER_XO), + SecureTestUtil.verifyDenied(exportAction, getUserByLogin(USER_RO), getUserByLogin(USER_XO), getUserByLogin(USER_NONE)); - SecureTestUtil.verifyAllowed(exportAction, - getUserByLogin(USER_ADMIN), - getUserByLogin(USER_OWNER), - getUserByLogin(USER_RX)); + SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_ADMIN), + getUserByLogin(USER_OWNER), getUserByLogin(USER_RX)); AccessTestAction deleteAction = () -> { UTIL.deleteTable(TableName.valueOf(exportTable)); return null; @@ -336,12 +312,11 @@ public void testAccessCase() throws Throwable { public void testVisibilityLabels() throws IOException, Throwable { final String exportTable = name.getMethodName() + "_export"; final String importTable = name.getMethodName() + "_import"; - final TableDescriptor exportHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(exportTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)) - .setOwnerString(USER_OWNER) + final TableDescriptor exportHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(exportTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYA)).setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, exportHtd, new byte[][]{Bytes.toBytes("s")}); + SecureTestUtil.createTable(UTIL, exportHtd, new byte[][] { Bytes.toBytes("s") }); AccessTestAction putAction = () -> { Put p1 = new Put(ROW1); p1.addColumn(FAMILYA, QUAL, NOW, QUAL); @@ -353,7 +328,7 @@ public void testVisibilityLabels() throws IOException, Throwable { p3.addColumn(FAMILYA, QUAL, NOW, QUAL); p3.setCellVisibility(new CellVisibility("!" + CONFIDENTIAL + " & " + TOPSECRET)); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table t = conn.getTable(TableName.valueOf(exportTable))) { + Table t = conn.getTable(TableName.valueOf(exportTable))) { t.put(p1); t.put(p2); t.put(p3); @@ -370,7 +345,7 @@ public void testVisibilityLabels() throws IOException, Throwable { for (final Pair, Integer> labelsAndRowCount : labelsAndRowCounts) { final List labels = labelsAndRowCount.getFirst(); final int rowCount = labelsAndRowCount.getSecond(); - //create a open permission directory. + // create a open permission directory. final Path openDir = new Path("testAccessCase"); final FileSystem fs = openDir.getFileSystem(UTIL.getConfiguration()); fs.mkdirs(openDir); @@ -381,10 +356,9 @@ public void testVisibilityLabels() throws IOException, Throwable { labels.forEach(v -> buf.append(v).append(",")); buf.deleteCharAt(buf.length() - 1); try { - String[] args = new String[]{ - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + buf.toString(), - exportTable, - output.toString(),}; + String[] args = + new String[] { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + buf.toString(), + exportTable, output.toString(), }; Export.run(new Configuration(UTIL.getConfiguration()), args); return null; } catch (ServiceException | IOException ex) { @@ -394,20 +368,17 @@ public void testVisibilityLabels() throws IOException, Throwable { } }; SecureTestUtil.verifyAllowed(exportAction, getUserByLogin(USER_OWNER)); - final TableDescriptor importHtd = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)) - .setOwnerString(USER_OWNER) + final TableDescriptor importHtd = + TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILYB)).setOwnerString(USER_OWNER) .build(); - SecureTestUtil.createTable(UTIL, importHtd, new byte[][]{Bytes.toBytes("s")}); + SecureTestUtil.createTable(UTIL, importHtd, new byte[][] { Bytes.toBytes("s") }); AccessTestAction importAction = () -> { - String[] args = new String[]{ - "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, - importTable, - output.toString() - }; - assertEquals(0, ToolRunner.run( - new Configuration(UTIL.getConfiguration()), new Import(), args)); + String[] args = new String[] { + "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, importTable, + output.toString() }; + assertEquals(0, + ToolRunner.run(new Configuration(UTIL.getConfiguration()), new Import(), args)); return null; }; SecureTestUtil.verifyAllowed(importAction, getUserByLogin(USER_OWNER)); @@ -415,8 +386,8 @@ public void testVisibilityLabels() throws IOException, Throwable { Scan scan = new Scan(); scan.setAuthorizations(new Authorizations(labels)); try (Connection conn = ConnectionFactory.createConnection(UTIL.getConfiguration()); - Table table = conn.getTable(importHtd.getTableName()); - ResultScanner scanner = table.getScanner(scan)) { + Table table = conn.getTable(importHtd.getTableName()); + ResultScanner scanner = table.getScanner(scan)) { int count = 0; for (Result r : scanner) { ++count; diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java index 170a303845be..448df4940f8b 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/ipc/TestCoprocessorRpcUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java index 0d15f93d9f5f..0981dd226b2d 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/SecureBulkLoadEndpointClient.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Table; @@ -65,8 +64,7 @@ public String prepareBulkLoad(final TableName tableName) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - PrepareBulkLoadRequest request = - PrepareBulkLoadRequest.newBuilder() + PrepareBulkLoadRequest request = PrepareBulkLoadRequest.newBuilder() .setTableName(ProtobufUtil.toProtoTableName(tableName)).build(); instance.prepareBulkLoad(controller, request, rpcCallback); @@ -94,12 +92,9 @@ public void cleanupBulkLoad(final String bulkToken) throws IOException { new CoprocessorRpcUtils.BlockingRpcCallback<>(); CleanupBulkLoadRequest request = - CleanupBulkLoadRequest.newBuilder() - .setBulkToken(bulkToken).build(); + CleanupBulkLoadRequest.newBuilder().setBulkToken(bulkToken).build(); - instance.cleanupBulkLoad(controller, - request, - rpcCallback); + instance.cleanupBulkLoad(controller, request, rpcCallback); if (controller.failedOnException()) { throw controller.getFailedOn(); @@ -110,8 +105,7 @@ public void cleanupBulkLoad(final String bulkToken) throws IOException { } public boolean bulkLoadHFiles(final List> familyPaths, - final Token userToken, final String bulkToken, final byte[] startRow) - throws IOException { + final Token userToken, final String bulkToken, final byte[] startRow) throws IOException { // we never want to send a batch of HFiles to all regions, thus cannot call // HTable#coprocessorService methods that take start and end rowkeys; see HBASE-9639 try { @@ -119,37 +113,30 @@ public boolean bulkLoadHFiles(final List> familyPaths, SecureBulkLoadProtos.SecureBulkLoadService instance = ProtobufUtil.newServiceStub(SecureBulkLoadProtos.SecureBulkLoadService.class, channel); - DelegationToken protoDT = - DelegationToken.newBuilder().build(); - if(userToken != null) { + DelegationToken protoDT = DelegationToken.newBuilder().build(); + if (userToken != null) { protoDT = - DelegationToken.newBuilder() - .setIdentifier(ByteStringer.wrap(userToken.getIdentifier())) - .setPassword(ByteStringer.wrap(userToken.getPassword())) - .setKind(userToken.getKind().toString()) - .setService(userToken.getService().toString()).build(); + DelegationToken.newBuilder().setIdentifier(ByteStringer.wrap(userToken.getIdentifier())) + .setPassword(ByteStringer.wrap(userToken.getPassword())) + .setKind(userToken.getKind().toString()) + .setService(userToken.getService().toString()).build(); } List protoFamilyPaths = new ArrayList<>(familyPaths.size()); - for(Pair el: familyPaths) { + for (Pair el : familyPaths) { protoFamilyPaths.add(ClientProtos.BulkLoadHFileRequest.FamilyPath.newBuilder() - .setFamily(ByteStringer.wrap(el.getFirst())) - .setPath(el.getSecond()).build()); + .setFamily(ByteStringer.wrap(el.getFirst())).setPath(el.getSecond()).build()); } SecureBulkLoadProtos.SecureBulkLoadHFilesRequest request = - SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.newBuilder() - .setFsToken(protoDT) - .addAllFamilyPath(protoFamilyPaths) - .setBulkToken(bulkToken).build(); + SecureBulkLoadProtos.SecureBulkLoadHFilesRequest.newBuilder().setFsToken(protoDT) + .addAllFamilyPath(protoFamilyPaths).setBulkToken(bulkToken).build(); ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback - rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - instance.secureBulkLoadHFiles(controller, - request, - rpcCallback); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); + instance.secureBulkLoadHFiles(controller, request, rpcCallback); SecureBulkLoadProtos.SecureBulkLoadHFilesResponse response = rpcCallback.get(); if (controller.failedOnException()) { diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java index 49697b831509..a4e84c15c556 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestHRegionServerBulkLoadWithOldSecureEndpoint.java @@ -59,7 +59,7 @@ * removed when old non-secure client for backward compatibility is not supported. */ @RunWith(Parameterized.class) -@Category({RegionServerTests.class, LargeTests.class}) +@Category({ RegionServerTests.class, LargeTests.class }) @Ignore // BROKEN. FIX OR REMOVE. public class TestHRegionServerBulkLoadWithOldSecureEndpoint extends TestHRegionServerBulkLoad { @ClassRule @@ -86,15 +86,14 @@ public static class AtomicHFileLoader extends RepeatingTestThread { private TableName tableName; public AtomicHFileLoader(TableName tableName, TestContext ctx, byte[][] targetFamilies) - throws IOException { + throws IOException { super(ctx); this.tableName = tableName; } public void doAnAction() throws Exception { long iteration = numBulkLoads.getAndIncrement(); - Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", - iteration)); + Path dir = UTIL.getDataTestDirOnTestFS(String.format("bulkLoad_%08d", iteration)); // create HFiles for different column families FileSystem fs = UTIL.getTestFileSystem(); @@ -112,20 +111,19 @@ public void doAnAction() throws Exception { Table table = conn.getTable(tableName); final String bulkToken = new SecureBulkLoadEndpointClient(table).prepareBulkLoad(tableName); RpcControllerFactory rpcControllerFactory = new RpcControllerFactory(UTIL.getConfiguration()); - ClientServiceCallable callable = - new ClientServiceCallable(conn, tableName, Bytes.toBytes("aaa"), - rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { - @Override - protected Void rpcCall() throws Exception { - LOG.debug("Going to connect to server " + getLocation() + " for row " + - Bytes.toStringBinary(getRow())); - try (Table table = conn.getTable(getTableName())) { - boolean loaded = new SecureBulkLoadEndpointClient(table).bulkLoadHFiles(famPaths, - null, bulkToken, getLocation().getRegionInfo().getStartKey()); - } - return null; + ClientServiceCallable callable = new ClientServiceCallable(conn, tableName, + Bytes.toBytes("aaa"), rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { + @Override + protected Void rpcCall() throws Exception { + LOG.debug("Going to connect to server " + getLocation() + " for row " + + Bytes.toStringBinary(getRow())); + try (Table table = conn.getTable(getTableName())) { + boolean loaded = new SecureBulkLoadEndpointClient(table).bulkLoadHFiles(famPaths, null, + bulkToken, getLocation().getRegionInfo().getStartKey()); } - }; + return null; + } + }; RpcRetryingCallerFactory factory = new RpcRetryingCallerFactory(conf); RpcRetryingCaller caller = factory. newCaller(); caller.callWithRetries(callable, Integer.MAX_VALUE); @@ -137,13 +135,11 @@ protected Void rpcCall() throws Exception { rpcControllerFactory.newController(), HConstants.PRIORITY_UNSET) { @Override protected Void rpcCall() throws Exception { - LOG.debug("compacting " + getLocation() + " for row " - + Bytes.toStringBinary(getRow())); + LOG.debug("compacting " + getLocation() + " for row " + Bytes.toStringBinary(getRow())); AdminProtos.AdminService.BlockingInterface server = - conn.getAdmin(getLocation().getServerName()); - CompactRegionRequest request = - RequestConverter.buildCompactRegionRequest( - getLocation().getRegionInfo().getRegionName(), true, null); + conn.getAdmin(getLocation().getServerName()); + CompactRegionRequest request = RequestConverter.buildCompactRegionRequest( + getLocation().getRegionInfo().getRegionName(), true, null); server.compactRegion(null, request); numCompactions.incrementAndGet(); return null; @@ -155,7 +151,7 @@ protected Void rpcCall() throws Exception { } void runAtomicBulkloadTest(TableName tableName, int millisToRun, int numScanners) - throws Exception { + throws Exception { setupTable(tableName, 10); TestContext ctx = new TestContext(UTIL.getConfiguration()); diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java index bc368e3c5a97..523e6fa2e090 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/regionserver/TestServerCustomProtocol.java @@ -66,7 +66,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({RegionServerTests.class, MediumTests.class}) +@Category({ RegionServerTests.class, MediumTests.class }) public class TestServerCustomProtocol { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -108,8 +108,8 @@ public void count(RpcController controller, CountRequest request, } @Override - public void increment(RpcController controller, - IncrementCountRequest request, RpcCallback done) { + public void increment(RpcController controller, IncrementCountRequest request, + RpcCallback done) { this.counter += request.getDiff(); done.run(IncrementCountResponse.newBuilder().setCount(this.counter).build()); } @@ -188,10 +188,10 @@ public static void tearDownAfterClass() throws Exception { @Test public void testSingleProxy() throws Throwable { Table table = util.getConnection().getTable(TEST_TABLE); - Map results = ping(table, null, null); + Map results = ping(table, null, null); // There are three regions so should get back three results. assertEquals(3, results.size()); - for (Map.Entry e: results.entrySet()) { + for (Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response", "pong", e.getValue()); } hello(table, "George", HELLO + "George"); @@ -200,125 +200,119 @@ public void testSingleProxy() throws Throwable { LOG.info("Who are you"); hello(table, NOBODY, null); LOG.info(NOBODY); - Map intResults = table.coprocessorService(PingProtos.PingService.class, - null, null, - new Batch.Call() { + Map intResults = table.coprocessorService(PingProtos.PingService.class, null, + null, new Batch.Call() { @Override public Integer call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.count(null, PingProtos.CountRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getCount(); } }); int count = -1; - for (Map.Entry e: intResults.entrySet()) { + for (Map.Entry e : intResults.entrySet()) { assertTrue(e.getValue() > 0); count = e.getValue(); } final int diff = 5; - intResults = table.coprocessorService(PingProtos.PingService.class, - null, null, + intResults = table.coprocessorService(PingProtos.PingService.class, null, null, new Batch.Call() { @Override public Integer call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.increment(null, - PingProtos.IncrementCountRequest.newBuilder().setDiff(diff).build(), - rpcCallback); + PingProtos.IncrementCountRequest.newBuilder().setDiff(diff).build(), rpcCallback); return rpcCallback.get().getCount(); } }); // There are three regions so should get back three results. assertEquals(3, results.size()); - for (Map.Entry e: intResults.entrySet()) { + for (Map.Entry e : intResults.entrySet()) { assertEquals(e.getValue().intValue(), count + diff); } table.close(); } - private Map hello(final Table table, final String send, final String response) - throws ServiceException, Throwable { - Map results = hello(table, send); - for (Map.Entry e: results.entrySet()) { + private Map hello(final Table table, final String send, final String response) + throws ServiceException, Throwable { + Map results = hello(table, send); + for (Map.Entry e : results.entrySet()) { assertEquals("Invalid custom protocol response", response, e.getValue()); } return results; } - private Map hello(final Table table, final String send) - throws ServiceException, Throwable { + private Map hello(final Table table, final String send) + throws ServiceException, Throwable { return hello(table, send, null, null); } - private Map hello(final Table table, final String send, final byte [] start, - final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(PingProtos.PingService.class, - start, end, - new Batch.Call() { - @Override - public String call(PingProtos.PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + private Map hello(final Table table, final String send, final byte[] start, + final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(PingProtos.PingService.class, start, end, + new Batch.Call() { + @Override + public String call(PingProtos.PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); - if (send != null) { - builder.setName(send); - } - instance.hello(null, builder.build(), rpcCallback); - PingProtos.HelloResponse r = rpcCallback.get(); - return r != null && r.hasResponse()? r.getResponse(): null; + PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); + if (send != null) { + builder.setName(send); } - }); + instance.hello(null, builder.build(), rpcCallback); + PingProtos.HelloResponse r = rpcCallback.get(); + return r != null && r.hasResponse() ? r.getResponse() : null; + } + }); } - private Map compoundOfHelloAndPing(final Table table, final byte [] start, - final byte [] end) throws ServiceException, Throwable { - return table.coprocessorService(PingProtos.PingService.class, - start, end, - new Batch.Call() { - @Override - public String call(PingProtos.PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + private Map compoundOfHelloAndPing(final Table table, final byte[] start, + final byte[] end) throws ServiceException, Throwable { + return table.coprocessorService(PingProtos.PingService.class, start, end, + new Batch.Call() { + @Override + public String call(PingProtos.PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); - // Call ping on same instance. Use result calling hello on same instance. - builder.setName(doPing(instance)); - instance.hello(null, builder.build(), rpcCallback); - PingProtos.HelloResponse r = rpcCallback.get(); - return r != null && r.hasResponse()? r.getResponse(): null; - } - }); + PingProtos.HelloRequest.Builder builder = PingProtos.HelloRequest.newBuilder(); + // Call ping on same instance. Use result calling hello on same instance. + builder.setName(doPing(instance)); + instance.hello(null, builder.build(), rpcCallback); + PingProtos.HelloResponse r = rpcCallback.get(); + return r != null && r.hasResponse() ? r.getResponse() : null; + } + }); } - private Map noop(final Table table, final byte [] start, final byte [] end) - throws ServiceException, Throwable { + private Map noop(final Table table, final byte[] start, final byte[] end) + throws ServiceException, Throwable { return table.coprocessorService(PingProtos.PingService.class, start, end, - new Batch.Call() { - @Override - public String call(PingProtos.PingService instance) throws IOException { - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new Batch.Call() { + @Override + public String call(PingProtos.PingService instance) throws IOException { + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = new CoprocessorRpcUtils.BlockingRpcCallback<>(); - PingProtos.NoopRequest.Builder builder = PingProtos.NoopRequest.newBuilder(); - instance.noop(null, builder.build(), rpcCallback); - rpcCallback.get(); - // Looks like null is expected when void. That is what the test below is looking for - return null; - } - }); + PingProtos.NoopRequest.Builder builder = PingProtos.NoopRequest.newBuilder(); + instance.noop(null, builder.build(), rpcCallback); + rpcCallback.get(); + // Looks like null is expected when void. That is what the test below is looking for + return null; + } + }); } @Test public void testSingleMethod() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = table.coprocessorService(PingProtos.PingService.class, - null, ROW_A, - new Batch.Call() { + Map results = table.coprocessorService(PingProtos.PingService.class, null, + ROW_A, new Batch.Call() { @Override public String call(PingProtos.PingService instance) throws IOException { CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + new CoprocessorRpcUtils.BlockingRpcCallback<>(); instance.ping(null, PingProtos.PingRequest.newBuilder().build(), rpcCallback); return rpcCallback.get().getPong(); } @@ -341,9 +335,9 @@ public String call(PingProtos.PingService instance) throws IOException { public void testRowRange() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - for (HRegionLocation e: locator.getAllRegionLocations()) { - LOG.info("Region " + e.getRegionInfo().getRegionNameAsString() - + ", servername=" + e.getServerName()); + for (HRegionLocation e : locator.getAllRegionLocations()) { + LOG.info("Region " + e.getRegionInfo().getRegionNameAsString() + ", servername=" + + e.getServerName()); } // Here are what regions looked like on a run: // @@ -351,7 +345,7 @@ public void testRowRange() throws Throwable { // test,bbb,1355943549661.110393b070dd1ed93441e0bc9b3ffb7e. // test,ccc,1355943549665.c3d6d125141359cbbd2a43eaff3cdf74. - Map results = ping(table, null, ROW_A); + Map results = ping(table, null, ROW_A); // Should contain first region only. assertEquals(1, results.size()); verifyRegionResults(locator, results, ROW_A); @@ -374,7 +368,7 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegionInfo().getRegionName())); + results.get(loc.getRegionInfo().getRegionName())); // test explicit start + end results = ping(table, ROW_AB, ROW_BC); @@ -384,7 +378,7 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegionInfo().getRegionName())); + results.get(loc.getRegionInfo().getRegionName())); // test single region results = ping(table, ROW_B, ROW_BC); @@ -393,15 +387,15 @@ public void testRowRange() throws Throwable { verifyRegionResults(locator, results, ROW_B); loc = locator.getRegionLocation(ROW_A, true); assertNull("Should be missing region for row aaa (prior to start)", - results.get(loc.getRegionInfo().getRegionName())); + results.get(loc.getRegionInfo().getRegionName())); loc = locator.getRegionLocation(ROW_C, true); assertNull("Should be missing region for row ccc (past stop row)", - results.get(loc.getRegionInfo().getRegionName())); + results.get(loc.getRegionInfo().getRegionName())); } } - private Map ping(final Table table, final byte [] start, final byte [] end) - throws ServiceException, Throwable { + private Map ping(final Table table, final byte[] start, final byte[] end) + throws ServiceException, Throwable { return table.coprocessorService(PingProtos.PingService.class, start, end, new Batch.Call() { @Override @@ -422,7 +416,7 @@ private static String doPing(PingProtos.PingService instance) throws IOException public void testCompoundCall() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = compoundOfHelloAndPing(table, ROW_A, ROW_C); + Map results = compoundOfHelloAndPing(table, ROW_A, ROW_C); verifyRegionResults(locator, results, "Hello, pong", ROW_A); verifyRegionResults(locator, results, "Hello, pong", ROW_B); verifyRegionResults(locator, results, "Hello, pong", ROW_C); @@ -433,7 +427,7 @@ public void testCompoundCall() throws Throwable { public void testNullCall() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = hello(table, null, ROW_A, ROW_C); + Map results = hello(table, null, ROW_A, ROW_C); verifyRegionResults(locator, results, "Who are you?", ROW_A); verifyRegionResults(locator, results, "Who are you?", ROW_B); verifyRegionResults(locator, results, "Who are you?", ROW_C); @@ -444,7 +438,7 @@ public void testNullCall() throws Throwable { public void testNullReturn() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE); RegionLocator locator = util.getConnection().getRegionLocator(TEST_TABLE)) { - Map results = hello(table, "nobody", ROW_A, ROW_C); + Map results = hello(table, "nobody", ROW_A, ROW_C); verifyRegionResults(locator, results, null, ROW_A); verifyRegionResults(locator, results, null, ROW_B); verifyRegionResults(locator, results, null, ROW_C); @@ -454,7 +448,7 @@ public void testNullReturn() throws Throwable { @Test public void testEmptyReturnType() throws Throwable { try (Table table = util.getConnection().getTable(TEST_TABLE)) { - Map results = noop(table, ROW_A, ROW_C); + Map results = noop(table, ROW_A, ROW_C); assertEquals("Should have results from three regions", 3, results.size()); // all results should be null for (Object v : results.values()) { @@ -463,24 +457,23 @@ public void testEmptyReturnType() throws Throwable { } } - private void verifyRegionResults(RegionLocator table, Map results, byte[] row) - throws Exception { + private void verifyRegionResults(RegionLocator table, Map results, byte[] row) + throws Exception { verifyRegionResults(table, results, "pong", row); } private void verifyRegionResults(RegionLocator regionLocator, Map results, - String expected, byte[] row) throws Exception { - for (Map.Entry e: results.entrySet()) { - LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + - ", result key=" + Bytes.toString(e.getKey()) + - ", value=" + e.getValue()); + String expected, byte[] row) throws Exception { + for (Map.Entry e : results.entrySet()) { + LOG.info("row=" + Bytes.toString(row) + ", expected=" + expected + ", result key=" + + Bytes.toString(e.getKey()) + ", value=" + e.getValue()); } HRegionLocation loc = regionLocator.getRegionLocation(row, true); byte[] region = loc.getRegionInfo().getRegionName(); - assertTrue("Results should contain region " + - Bytes.toStringBinary(region) + " for row '" + Bytes.toStringBinary(row)+ "'", + assertTrue("Results should contain region " + Bytes.toStringBinary(region) + " for row '" + + Bytes.toStringBinary(row) + "'", results.containsKey(region)); - assertEquals("Invalid result for row '"+Bytes.toStringBinary(row)+"'", - expected, results.get(region)); + assertEquals("Invalid result for row '" + Bytes.toStringBinary(row) + "'", expected, + results.get(region)); } } diff --git a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java index d66ab4cb9ea2..eae7e7dac80b 100644 --- a/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java +++ b/hbase-endpoint/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationSyncUpToolWithBulkLoadedData.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -54,10 +54,10 @@ public class TestReplicationSyncUpToolWithBulkLoadedData extends TestReplication @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithBulkLoadedData.class); + HBaseClassTestRule.forClass(TestReplicationSyncUpToolWithBulkLoadedData.class); private static final Logger LOG = - LoggerFactory.getLogger(TestReplicationSyncUpToolWithBulkLoadedData.class); + LoggerFactory.getLogger(TestReplicationSyncUpToolWithBulkLoadedData.class); @Override protected void customizeClusterConf(Configuration conf) { @@ -163,9 +163,9 @@ private void mimicSyncUpAfterBulkLoad(Iterator randomHFileRangeListItera LOG.info("SyncUpAfterBulkLoad succeeded at retry = " + i); break; } else { - LOG.debug("SyncUpAfterBulkLoad failed at retry = " + i + - ", with rowCount_ht1TargetPeer1 =" + rowCountHt1TargetAtPeer1 + - " and rowCount_ht2TargetAtPeer1 =" + rowCountHt2TargetAtPeer1); + LOG.debug("SyncUpAfterBulkLoad failed at retry = " + i + ", with rowCount_ht1TargetPeer1 =" + + rowCountHt1TargetAtPeer1 + " and rowCount_ht2TargetAtPeer1 =" + + rowCountHt2TargetAtPeer1); } Thread.sleep(SLEEP_TIME); } @@ -177,25 +177,25 @@ private void loadAndReplicateHFiles(boolean verifyReplicationOnSlave, // Load 100 + 3 hfiles to t1_syncup. byte[][][] hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), - Bytes.toBytes(randomHFileRangeListIterator.next()) } }; + new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), + Bytes.toBytes(randomHFileRangeListIterator.next()) } }; loadAndValidateHFileReplication("HFileReplication_1", row, FAMILY, ht1Source, hfileRanges, 100); hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), - Bytes.toBytes(randomHFileRangeListIterator.next()) } }; + new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), + Bytes.toBytes(randomHFileRangeListIterator.next()) } }; loadAndValidateHFileReplication("HFileReplication_1", row, NO_REP_FAMILY, ht1Source, hfileRanges, 3); // Load 200 + 3 hfiles to t2_syncup. hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), - Bytes.toBytes(randomHFileRangeListIterator.next()) } }; + new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), + Bytes.toBytes(randomHFileRangeListIterator.next()) } }; loadAndValidateHFileReplication("HFileReplication_1", row, FAMILY, ht2Source, hfileRanges, 200); hfileRanges = - new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), - Bytes.toBytes(randomHFileRangeListIterator.next()) } }; + new byte[][][] { new byte[][] { Bytes.toBytes(randomHFileRangeListIterator.next()), + Bytes.toBytes(randomHFileRangeListIterator.next()) } }; loadAndValidateHFileReplication("HFileReplication_1", row, NO_REP_FAMILY, ht2Source, hfileRanges, 3); diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml index b6b3dfc4dc16..d5d193c50fc7 100644 --- a/hbase-examples/pom.xml +++ b/hbase-examples/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-examples Apache HBase - Examples Examples of HBase usage - - - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - ${surefire.firstPartGroups} - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - org.apache.hbase.thirdparty @@ -211,8 +162,8 @@ org.apache.hbase hbase-http - test test-jar + test org.slf4j @@ -245,6 +196,55 @@ test + + + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + ${surefire.firstPartGroups} + + + + + org.apache.maven.plugins + maven-source-plugin + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + @@ -289,10 +289,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-external-blockcache Apache HBase - External Block Cache - - HBase module that provides out of process block cache. + HBase module that provides out of process block cache. Currently Memcached is the reference implementation for external block cache. External block caches allow HBase to take advantage of other more complex caches that can live longer than the HBase regionserver process and are not necessarily tied to a single computer - life time. However external block caches add in extra operational overhead. - - + life time. However external block caches add in extra operational overhead. + + + + org.apache.hbase + hbase-common + + + org.apache.hbase + hbase-server + + + net.spy + spymemcached + true + + + org.slf4j + slf4j-api + + + junit + junit + test + + + @@ -60,10 +81,10 @@ versionInfo-source - generate-sources add-source + generate-sources ${project.build.directory}/generated-sources/java @@ -91,31 +112,6 @@ - - - org.apache.hbase - hbase-common - - - org.apache.hbase - hbase-server - - - net.spy - spymemcached - true - - - org.slf4j - slf4j-api - - - junit - junit - test - - - @@ -128,10 +124,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -170,7 +166,8 @@ - !hadoop.profile + + !hadoop.profile @@ -192,10 +189,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-hadoop-compat Apache HBase - Hadoop Compatibility - - Interfaces to be implemented in order to smooth - over hadoop version differences - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - + Interfaces to be implemented in order to smooth + over hadoop version differences @@ -125,8 +100,29 @@ test + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + - + skipHadoopCompatTests @@ -149,15 +145,14 @@ - org.eclipse.m2e lifecycle-mapping - - + diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java index d29e7bc1d3b3..220faa5b9753 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilityFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.Iterator; import java.util.ServiceLoader; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,7 +36,8 @@ public class CompatibilityFactory { /** * This is a static only class don't let any instance be created. */ - protected CompatibilityFactory() {} + protected CompatibilityFactory() { + } public static synchronized T getInstance(Class klass) { T instance = null; @@ -48,10 +47,9 @@ public static synchronized T getInstance(Class klass) { instance = it.next(); if (it.hasNext()) { StringBuilder msg = new StringBuilder(); - msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) - .append(", other implementations: {"); + msg.append("ServiceLoader provided more than one implementation for class: ").append(klass) + .append(", using implementation: ").append(instance.getClass()) + .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java index 0e633b8b15f4..bacdc11c300e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/CompatibilitySingletonFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,39 +15,39 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.util.HashMap; import java.util.Iterator; import java.util.Map; import java.util.ServiceLoader; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be - * created. + * Factory for classes supplied by hadoop compatibility modules. Only one of each class will be + * created. */ @InterfaceAudience.Private public class CompatibilitySingletonFactory extends CompatibilityFactory { public static enum SingletonStorage { INSTANCE; + private final Object lock = new Object(); private final Map instances = new HashMap<>(); } + private static final Logger LOG = LoggerFactory.getLogger(CompatibilitySingletonFactory.class); /** * This is a static only class don't let anyone create an instance. */ - protected CompatibilitySingletonFactory() { } + protected CompatibilitySingletonFactory() { + } /** * Get the singleton instance of Any classes defined by compatibiliy jar's - * * @return the singleton */ @SuppressWarnings("unchecked") @@ -62,8 +62,7 @@ public static T getInstance(Class klass) { if (it.hasNext()) { StringBuilder msg = new StringBuilder(); msg.append("ServiceLoader provided more than one implementation for class: ") - .append(klass) - .append(", using implementation: ").append(instance.getClass()) + .append(klass).append(", using implementation: ").append(instance.getClass()) .append(", other implementations: {"); while (it.hasNext()) { msg.append(it.next()).append(" "); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java index c0a8519c10cd..b9258965eb42 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -44,24 +43,22 @@ public interface MetricsIOSource extends BaseSource { */ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String FS_READ_TIME_HISTO_KEY = "fsReadTime"; String FS_PREAD_TIME_HISTO_KEY = "fsPReadTime"; String FS_WRITE_HISTO_KEY = "fsWriteTime"; String CHECKSUM_FAILURES_KEY = "fsChecksumFailureCount"; - String FS_READ_TIME_HISTO_DESC - = "Latency of HFile's sequential reads on this region server in milliseconds"; - String FS_PREAD_TIME_HISTO_DESC - = "Latency of HFile's positional reads on this region server in milliseconds"; - String FS_WRITE_TIME_HISTO_DESC - = "Latency of HFile's writes on this region server in milliseconds"; + String FS_READ_TIME_HISTO_DESC = + "Latency of HFile's sequential reads on this region server in milliseconds"; + String FS_PREAD_TIME_HISTO_DESC = + "Latency of HFile's positional reads on this region server in milliseconds"; + String FS_WRITE_TIME_HISTO_DESC = + "Latency of HFile's writes on this region server in milliseconds"; String CHECKSUM_FAILURES_DESC = "Number of checksum failures for the HBase HFile checksums at the" + " HBase level (separate from HDFS checksums)"; - /** * Update the fs sequential read time histogram * @param t time it took, in milliseconds diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java index 3ba8cd5d0ae8..e3dc724d8b7d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java index 69bd040e7f95..2db18d56e0c2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSource.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSource; @@ -25,20 +23,15 @@ @InterfaceAudience.Private public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String AUTHORIZATION_SUCCESSES_NAME = "authorizationSuccesses"; - String AUTHORIZATION_SUCCESSES_DESC = - "Number of authorization successes."; + String AUTHORIZATION_SUCCESSES_DESC = "Number of authorization successes."; String AUTHORIZATION_FAILURES_NAME = "authorizationFailures"; - String AUTHORIZATION_FAILURES_DESC = - "Number of authorization failures."; + String AUTHORIZATION_FAILURES_DESC = "Number of authorization failures."; String AUTHENTICATION_SUCCESSES_NAME = "authenticationSuccesses"; - String AUTHENTICATION_SUCCESSES_DESC = - "Number of authentication successes."; + String AUTHENTICATION_SUCCESSES_DESC = "Number of authentication successes."; String AUTHENTICATION_FAILURES_NAME = "authenticationFailures"; - String AUTHENTICATION_FAILURES_DESC = - "Number of authentication failures."; + String AUTHENTICATION_FAILURES_DESC = "Number of authentication failures."; String AUTHENTICATION_FALLBACKS_NAME = "authenticationFallbacks"; - String AUTHENTICATION_FALLBACKS_DESC = - "Number of fallbacks to insecure authentication."; + String AUTHENTICATION_FALLBACKS_DESC = "Number of fallbacks to insecure authentication."; String SENT_BYTES_NAME = "sentBytes"; String SENT_BYTES_DESC = "Number of bytes sent."; String RECEIVED_BYTES_NAME = "receivedBytes"; @@ -54,27 +47,26 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String TOTAL_CALL_TIME_NAME = "totalCallTime"; String TOTAL_CALL_TIME_DESC = "Total call time, including both queued and processing time."; String QUEUE_SIZE_NAME = "queueSize"; - String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + - "parsed and is waiting to run or is currently being executed."; + String QUEUE_SIZE_DESC = "Number of bytes in the call queues; request has been read and " + + "parsed and is waiting to run or is currently being executed."; String GENERAL_QUEUE_NAME = "numCallsInGeneralQueue"; - String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + - "parsed requests waiting in scheduler to be executed"; + String GENERAL_QUEUE_DESC = "Number of calls in the general call queue; " + + "parsed requests waiting in scheduler to be executed"; String PRIORITY_QUEUE_NAME = "numCallsInPriorityQueue"; String METAPRIORITY_QUEUE_NAME = "numCallsInMetaPriorityQueue"; String REPLICATION_QUEUE_NAME = "numCallsInReplicationQueue"; - String REPLICATION_QUEUE_DESC = - "Number of calls in the replication call queue waiting to be run"; + String REPLICATION_QUEUE_DESC = "Number of calls in the replication call queue waiting to be run"; String PRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String METAPRIORITY_QUEUE_DESC = "Number of calls in the priority call queue waiting to be run"; String WRITE_QUEUE_NAME = "numCallsInWriteQueue"; - String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " + - "parsed requests waiting in scheduler to be executed"; + String WRITE_QUEUE_DESC = "Number of calls in the write call queue; " + + "parsed requests waiting in scheduler to be executed"; String READ_QUEUE_NAME = "numCallsInReadQueue"; - String READ_QUEUE_DESC = "Number of calls in the read call queue; " + - "parsed requests waiting in scheduler to be executed"; + String READ_QUEUE_DESC = "Number of calls in the read call queue; " + + "parsed requests waiting in scheduler to be executed"; String SCAN_QUEUE_NAME = "numCallsInScanQueue"; - String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " + - "parsed requests waiting in scheduler to be executed"; + String SCAN_QUEUE_DESC = "Number of calls in the scan call queue; " + + "parsed requests waiting in scheduler to be executed"; String NUM_OPEN_CONNECTIONS_NAME = "numOpenConnections"; String NUM_OPEN_CONNECTIONS_DESC = "Number of open connections."; String NUM_ACTIVE_HANDLER_NAME = "numActiveHandler"; @@ -92,17 +84,16 @@ public interface MetricsHBaseServerSource extends ExceptionTrackingSource { String NUM_ACTIVE_SCAN_HANDLER_NAME = "numActiveScanHandler"; String NUM_ACTIVE_SCAN_HANDLER_DESC = "Number of active scan rpc handlers."; String NUM_GENERAL_CALLS_DROPPED_NAME = "numGeneralCallsDropped"; - String NUM_GENERAL_CALLS_DROPPED_DESC = "Total number of calls in general queue which " + - "were dropped by CoDel RPC executor"; + String NUM_GENERAL_CALLS_DROPPED_DESC = + "Total number of calls in general queue which " + "were dropped by CoDel RPC executor"; String NUM_LIFO_MODE_SWITCHES_NAME = "numLifoModeSwitches"; - String NUM_LIFO_MODE_SWITCHES_DESC = "Total number of calls in general queue which " + - "were served from the tail of the queue"; + String NUM_LIFO_MODE_SWITCHES_DESC = + "Total number of calls in general queue which " + "were served from the tail of the queue"; // Direct Memory Usage metrics String NETTY_DM_USAGE_NAME = "nettyDirectMemoryUsage"; String NETTY_DM_USAGE_DESC = "Current Netty direct memory usage."; - void authorizationSuccess(); void authorizationFailure(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java index 7f1415ae86f2..027c197333a3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; @@ -34,18 +32,16 @@ public abstract class MetricsHBaseServerSourceFactory { static final String METRICS_DESCRIPTION = "Metrics about HBase Server IPC"; /** - * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. - * - * JMX_CONTEXT will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX + * The Suffix of the JMX Context that a MetricsHBaseServerSource will register under. JMX_CONTEXT + * will be created by createContextName(serverClassName) + METRICS_JMX_CONTEXT_SUFFIX */ static final String METRICS_JMX_CONTEXT_SUFFIX = ",sub=" + METRICS_NAME; abstract MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrapper wrapper); /** - * From the name of the class that's starting up create the - * context that an IPC source should register itself. - * + * From the name of the class that's starting up create the context that an IPC source should + * register itself. * @param serverName The name of the class that's starting up. * @return The Camel Cased context name. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java index db30c0348c35..136294883b69 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerWrapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java index 279454245a6a..f1692edffaf2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -56,7 +55,8 @@ public interface MetricsAssignmentManagerSource extends BaseSource { String RIT_COUNT_DESC = "Current number of Regions In Transition (Gauge)."; String RIT_COUNT_OVER_THRESHOLD_DESC = "Current number of Regions In Transition over threshold time (Gauge)."; - String RIT_OLDEST_AGE_DESC = "Timestamp in milliseconds of the oldest Region In Transition (Gauge)."; + String RIT_OLDEST_AGE_DESC = + "Timestamp in milliseconds of the oldest Region In Transition (Gauge)."; String RIT_DURATION_DESC = "Total durations in milliseconds for all Regions in Transition (Histogram)."; @@ -94,21 +94,18 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of regions in transition. - * * @param ritCount count of the regions in transition. */ void setRIT(int ritCount); /** * Set the count of the number of regions that have been in transition over the threshold time. - * * @param ritCountOverThreshold number of regions in transition for longer than threshold. */ void setRITCountOverThreshold(int ritCountOverThreshold); /** * Set the oldest region in transition. - * * @param age age of the oldest RIT. */ void setRITOldestAge(long age); @@ -121,35 +118,30 @@ public interface MetricsAssignmentManagerSource extends BaseSource { /** * Set the number of orphan regions on RS. - * * @param orphanRegionsOnRs count of the orphan regions on RS in HBCK chore report. */ void setOrphanRegionsOnRs(int orphanRegionsOnRs); /** * Set the number of orphan regions on FS. - * * @param orphanRegionsOnFs count of the orphan regions on FS in HBCK chore report. */ void setOrphanRegionsOnFs(int orphanRegionsOnFs); /** * Set the number of inconsistent regions. - * * @param inconsistentRegions count of the inconsistent regions in HBCK chore report. */ void setInconsistentRegions(int inconsistentRegions); /** * Set the number of holes. - * * @param holes count of the holes in CatalogJanitor Consistency report. */ void setHoles(int holes); /** * Set the number of overlaps. - * * @param overlaps count of the overlaps in CatalogJanitor Consistency report. */ void setOverlaps(int overlaps); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java index 91dc71a034cc..53ed8a25ed0e 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFileSystemSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -54,7 +53,6 @@ public interface MetricsMasterFileSystemSource extends BaseSource { String SPLIT_TIME_DESC = "Time it takes to finish WAL.splitLog()"; String SPLIT_SIZE_DESC = "Size of WAL files being split"; - void updateMetaWALSplitTime(long time); void updateMetaWALSplitSize(long size); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java index db4f25ec03e3..07ceaaf2e241 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java index 197f9f9fe754..a399e53b4fb3 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java index 8450432ade67..270e4e49f3f2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -57,40 +58,35 @@ public interface MetricsMasterQuotaSource extends BaseSource { /** * Updates the metric tracking the number of space quotas defined in the system. - * * @param numSpaceQuotas The number of space quotas defined */ void updateNumSpaceQuotas(long numSpaceQuotas); /** - * Updates the metric tracking the number of tables the master has computed to be in - * violation of their space quota. - * + * Updates the metric tracking the number of tables the master has computed to be in violation of + * their space quota. * @param numTablesInViolation The number of tables violating a space quota */ void updateNumTablesInSpaceQuotaViolation(long numTablesInViolation); /** - * Updates the metric tracking the number of namespaces the master has computed to be in - * violation of their space quota. - * + * Updates the metric tracking the number of namespaces the master has computed to be in violation + * of their space quota. * @param numNamespacesInViolation The number of namespaces violating a space quota */ void updateNumNamespacesInSpaceQuotaViolation(long numNamespacesInViolation); /** - * Updates the metric tracking the number of region size reports the master is currently - * retaining in memory. - * + * Updates the metric tracking the number of region size reports the master is currently retaining + * in memory. * @param numCurrentRegionSizeReports The number of region size reports the master is holding in - * memory + * memory */ void updateNumCurrentSpaceQuotaRegionSizeReports(long numCurrentRegionSizeReports); /** - * Updates the metric tracking the amount of time taken by the {@code QuotaObserverChore} - * which runs periodically. - * + * Updates the metric tracking the amount of time taken by the {@code QuotaObserverChore} which + * runs periodically. * @param time The execution time of the chore in milliseconds */ void incrementSpaceQuotaObserverChoreTime(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java index 2dcd945ea811..a53652b0f3dc 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactory.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java index 3bf2fddbc214..4d32cfc584bf 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -69,7 +68,8 @@ public interface MetricsMasterSource extends BaseSource { String CLUSTER_REQUESTS_NAME = "clusterRequests"; String MASTER_ACTIVE_TIME_DESC = "Master Active Time"; String MASTER_START_TIME_DESC = "Master Start Time"; - String MASTER_FINISHED_INITIALIZATION_TIME_DESC = "Timestamp when Master has finished initializing"; + String MASTER_FINISHED_INITIALIZATION_TIME_DESC = + "Timestamp when Master has finished initializing"; String AVERAGE_LOAD_DESC = "AverageLoad"; String LIVE_REGION_SERVERS_DESC = "Names of live RegionServers"; String NUMBER_OF_REGION_SERVERS_DESC = "Number of RegionServers"; @@ -88,7 +88,6 @@ public interface MetricsMasterSource extends BaseSource { /** * Increment the number of requests the cluster has seen. - * * @param inc Ammount to increment the total by. */ void incRequests(final long inc); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java index fce574a2cf07..bfdf348b34f7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java index fc95be8f42a3..051ad4335c28 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import java.util.Map; @@ -41,63 +40,54 @@ public interface MetricsMasterWrapper { /** * Get Average Load - * * @return Average Load */ double getAverageLoad(); /** * Get the Cluster ID - * * @return Cluster ID */ String getClusterId(); /** * Get the ZooKeeper Quorum Info - * * @return ZooKeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors - * * @return Co-processors */ String[] getCoprocessors(); /** * Get hbase master start time - * * @return Start time of master in milliseconds */ long getStartTime(); /** * Get the hbase master active time - * * @return Time in milliseconds when master became active */ long getActiveTime(); /** * Whether this master is the active master - * * @return True if this is the active master */ boolean getIsActiveMaster(); /** * Get the live region servers - * * @return Live region servers */ String getRegionServers(); /** * Get the number of live region servers - * * @return number of Live region servers */ @@ -105,28 +95,24 @@ public interface MetricsMasterWrapper { /** * Get the dead region servers - * * @return Dead region Servers */ String getDeadRegionServers(); /** * Get the number of dead region servers - * * @return number of Dead region Servers */ int getNumDeadRegionServers(); /** * Get the draining region servers - * * @return Draining region server */ String getDrainingRegionServers(); /** * Get the number of draining region servers - * * @return number of draining region servers */ int getNumDrainingRegionServers(); @@ -149,12 +135,12 @@ public interface MetricsMasterWrapper { /** * Gets the space usage and limit for each table. */ - Map> getTableSpaceUtilization(); + Map> getTableSpaceUtilization(); /** * Gets the space usage and limit for each namespace. */ - Map> getNamespaceSpaceUtilization(); + Map> getNamespaceSpaceUtilization(); /** * Get the time in Millis when the master finished initializing/becoming the active master diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java index 15315b6c3ef8..88e21621f100 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java index 6b8c40ba5127..502de8859ae9 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public interface MetricsBalancerSource extends BaseSource { +public interface MetricsBalancerSource extends BaseSource { /** * The name of the metrics diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java index 6eecc1233fd3..dac3d31781a7 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,16 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface extends the basic metrics balancer source to add a function - * to report metrics that related to stochastic load balancer. The purpose is to - * offer an insight to the internal cost calculations that can be useful to tune - * the balancer. For details, refer to HBASE-13965 + * This interface extends the basic metrics balancer source to add a function to report metrics that + * related to stochastic load balancer. The purpose is to offer an insight to the internal cost + * calculations that can be useful to tune the balancer. For details, refer to HBASE-13965 */ @InterfaceAudience.Private public interface MetricsStochasticBalancerSource extends MetricsBalancerSource { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java index 76391bb8d7b7..a2f2bb9d17f8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; /** - * BaseSource for dynamic metrics to announce to Metrics2. - * In hbase-hadoop{1|2}-compat there is an implementation of this interface. + * BaseSource for dynamic metrics to announce to Metrics2. In hbase-hadoop{1|2}-compat there is an + * implementation of this interface. */ @InterfaceAudience.Private public interface BaseSource { @@ -36,56 +35,48 @@ public interface BaseSource { /** * Set a gauge to a specific value. - * * @param gaugeName the name of the gauge - * @param value the value + * @param value the value */ void setGauge(String gaugeName, long value); /** * Add some amount to a gauge. - * * @param gaugeName the name of the gauge - * @param delta the amount to change the gauge by. + * @param delta the amount to change the gauge by. */ void incGauge(String gaugeName, long delta); /** * Subtract some amount from a gauge. - * * @param gaugeName the name of the gauge - * @param delta the amount to change the gauge by. + * @param delta the amount to change the gauge by. */ void decGauge(String gaugeName, long delta); /** * Remove a metric and no longer announce it. - * * @param key Name of the gauge to remove. */ void removeMetric(String key); /** * Add some amount to a counter. - * * @param counterName the name of the counter - * @param delta the amount to change the counter by. + * @param delta the amount to change the counter by. */ void incCounters(String counterName, long delta); /** * Add some value to a histogram. - * * @param name the name of the histogram * @param value the value to add to the histogram */ void updateHistogram(String name, long value); - /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. + * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver - * * @return The string context used to register this source to hadoop's metrics2 system. */ String getMetricsContext(); @@ -96,20 +87,19 @@ public interface BaseSource { String getMetricsDescription(); /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * Get the name of the context in JMX that this source will be exposed through. This is in + * ObjectName format. With the default context being Hadoop -> HBase */ String getMetricsJmxContext(); /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL + * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ String getMetricsName(); default MetricRegistryInfo getMetricRegistryInfo() { - return new MetricRegistryInfo(getMetricsName(), getMetricsDescription(), - getMetricsContext(), getMetricsJmxContext(), true); + return new MetricRegistryInfo(getMetricsName(), getMetricsDescription(), getMetricsContext(), + getMetricsJmxContext(), true); } } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java index 3c5f898fc290..e582a57ad502 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSource.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -26,19 +25,20 @@ */ @InterfaceAudience.Private public interface ExceptionTrackingSource extends BaseSource { - String EXCEPTIONS_NAME="exceptions"; - String EXCEPTIONS_DESC="Exceptions caused by requests"; - String EXCEPTIONS_TYPE_DESC="Number of requests that resulted in the specified type of Exception"; - String EXCEPTIONS_OOO_NAME="exceptions.OutOfOrderScannerNextException"; - String EXCEPTIONS_BUSY_NAME="exceptions.RegionTooBusyException"; - String EXCEPTIONS_UNKNOWN_NAME="exceptions.UnknownScannerException"; - String EXCEPTIONS_SCANNER_RESET_NAME="exceptions.ScannerResetException"; - String EXCEPTIONS_SANITY_NAME="exceptions.FailedSanityCheckException"; - String EXCEPTIONS_MOVED_NAME="exceptions.RegionMovedException"; - String EXCEPTIONS_NSRE_NAME="exceptions.NotServingRegionException"; + String EXCEPTIONS_NAME = "exceptions"; + String EXCEPTIONS_DESC = "Exceptions caused by requests"; + String EXCEPTIONS_TYPE_DESC = + "Number of requests that resulted in the specified type of Exception"; + String EXCEPTIONS_OOO_NAME = "exceptions.OutOfOrderScannerNextException"; + String EXCEPTIONS_BUSY_NAME = "exceptions.RegionTooBusyException"; + String EXCEPTIONS_UNKNOWN_NAME = "exceptions.UnknownScannerException"; + String EXCEPTIONS_SCANNER_RESET_NAME = "exceptions.ScannerResetException"; + String EXCEPTIONS_SANITY_NAME = "exceptions.FailedSanityCheckException"; + String EXCEPTIONS_MOVED_NAME = "exceptions.RegionMovedException"; + String EXCEPTIONS_NSRE_NAME = "exceptions.NotServingRegionException"; String EXCEPTIONS_MULTI_TOO_LARGE_NAME = "exceptions.multiResponseTooLarge"; - String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + - "rest of the requests will have to be retried."; + String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " + + "rest of the requests will have to be retried."; String EXCEPTIONS_CALL_QUEUE_TOO_BIG = "exceptions.callQueueTooBig"; String EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC = "Call queue is full"; String EXCEPTIONS_QUOTA_EXCEEDED = "exceptions.quotaExceeded"; @@ -54,18 +54,32 @@ public interface ExceptionTrackingSource extends BaseSource { * Different types of exceptions */ void outOfOrderException(); + void failedSanityException(); + void movedRegionException(); + void notServingRegionException(); + void unknownScannerException(); + void scannerResetException(); + void tooBusyException(); + void multiActionTooLargeException(); + void callQueueTooBigException(); + void quotaExceededException(); + void rpcThrottlingException(); + void callDroppedException(); + void callTimedOut(); + void requestTooBigException(); + void otherExceptions(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java index 6cb542586c98..8c89eb577614 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/JvmPauseMonitorSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -24,7 +23,7 @@ * Interface for sources that will export JvmPauseMonitor metrics */ @InterfaceAudience.Private -public interface JvmPauseMonitorSource { +public interface JvmPauseMonitorSource { String INFO_THRESHOLD_COUNT_KEY = "pauseInfoThresholdExceeded"; String INFO_THRESHOLD_COUNT_DESC = "Count of INFO level pause threshold alerts"; @@ -52,14 +51,12 @@ public interface JvmPauseMonitorSource { /** * Update the pause time histogram where GC activity was detected. - * * @param t time it took */ void updatePauseTimeWithGc(long t); /** * Update the pause time histogram where GC activity was not detected. - * * @param t time it took */ void updatePauseTimeWithoutGc(long t); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java index 575ca31c6442..d2e4baceafc8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,17 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import javax.management.ObjectName; import org.apache.yetus.audience.InterfaceAudience; /** - * Object that will register an mbean with the underlying metrics implementation. + * Object that will register an mbean with the underlying metrics implementation. */ @InterfaceAudience.Private -public interface MBeanSource { +public interface MBeanSource { /** * Register an mbean with the underlying metrics system @@ -34,7 +33,6 @@ public interface MBeanSource { * @param theMbean the actual MBean * @return ObjectName from jmx */ - ObjectName register(String serviceName, String metricsName, - Object theMbean); + ObjectName register(String serviceName, String metricsName, Object theMbean); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java index 064c9ca3f9a1..b90b6a3c674b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/metrics/OperationMetrics.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -41,9 +40,9 @@ public OperationMetrics(final MetricRegistry registry, final String metricNamePr Preconditions.checkNotNull(metricNamePrefix); /** - * TODO: As of now, Metrics description cannot be added/ registered with - * {@link MetricRegistry}. As metric names are unambiguous but concise, descriptions of - * metrics need to be made available someplace for users. + * TODO: As of now, Metrics description cannot be added/ registered with {@link MetricRegistry}. + * As metric names are unambiguous but concise, descriptions of metrics need to be made + * available someplace for users. */ submittedCounter = registry.counter(metricNamePrefix + SUBMITTED_COUNT); timeHisto = registry.histogram(metricNamePrefix + TIME); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java index 868acd84f7e9..6aa69175551d 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSource.java @@ -1,20 +1,19 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java index 1d9e1ac19658..733ff60df715 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions into the hadoop metrics system. + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * regions into the hadoop metrics system. */ @InterfaceAudience.Private public interface MetricsRegionAggregateSource extends BaseSource { @@ -52,14 +51,12 @@ public interface MetricsRegionAggregateSource extends BaseSource { /** * Register a MetricsRegionSource as being open. - * * @param source the source for the region being opened. */ void register(MetricsRegionSource source); /** * Remove a region's source. This is called when a region is closed. - * * @param source The region to remove. */ void deregister(MetricsRegionSource source); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java index 93990ef1bd4e..991187bc98eb 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSource.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -39,31 +40,28 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { String REGION_SIZE_REPORTING_CHORE_TIME_NAME = "regionSizeReportingChoreTime"; /** - * Updates the metric tracking how many tables this RegionServer has marked as in violation - * of their space quota. + * Updates the metric tracking how many tables this RegionServer has marked as in violation of + * their space quota. */ void updateNumTablesInSpaceQuotaViolation(long tablesInViolation); /** * Updates the metric tracking how many tables this RegionServer has received * {@code SpaceQuotaSnapshot}s for. - * * @param numSnapshots The number of {@code SpaceQuotaSnapshot}s received from the Master. */ void updateNumTableSpaceQuotaSnapshots(long numSnapshots); /** - * Updates the metric tracking how much time was spent scanning the filesystem to compute - * the size of each region hosted by this RegionServer. - * + * Updates the metric tracking how much time was spent scanning the filesystem to compute the size + * of each region hosted by this RegionServer. * @param time The execution time of the chore in milliseconds. */ void incrementSpaceQuotaFileSystemScannerChoreTime(long time); /** - * Updates the metric tracking how much time was spent updating the RegionServer with the - * latest information on space quotas from the {@code hbase:quota} table. - * + * Updates the metric tracking how much time was spent updating the RegionServer with the latest + * information on space quotas from the {@code hbase:quota} table. * @param time The execution time of the chore in milliseconds. */ void incrementSpaceQuotaRefresherChoreTime(long time); @@ -71,7 +69,6 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { /** * Updates the metric tracking how many region size reports were sent from this RegionServer to * the Master. These reports contain information on the size of each Region hosted locally. - * * @param numReportsSent The number of region size reports sent */ void incrementNumRegionSizeReportsSent(long numReportsSent); @@ -79,7 +76,6 @@ public interface MetricsRegionServerQuotaSource extends BaseSource { /** * Updates the metric tracking how much time was spent sending region size reports to the Master * by the RegionSizeReportingChore. - * * @param time The execution time in milliseconds. */ void incrementRegionSizeReportingChoreTime(long time); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java index 834a33873069..0e666d8046d4 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -50,7 +49,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Put time histogram - * * @param t time it took */ void updatePut(long t); @@ -63,7 +61,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Delete time histogram - * * @param t time it took */ void updateDelete(long t); @@ -94,42 +91,37 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo /** * Update the Get time histogram . - * * @param t time it took */ void updateGet(long t); /** * Update the Increment time histogram. - * * @param t time it took */ void updateIncrement(long t); /** * Update the Append time histogram. - * * @param t time it took */ void updateAppend(long t); /** * Update the Replay time histogram. - * * @param t time it took */ void updateReplay(long t); /** * Update the scan size. - * * @param scanSize size of the scan */ void updateScanSize(long scanSize); /** * Update the scan time. - * */ + */ void updateScanTime(long t); /** @@ -256,14 +248,14 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String STOREFILE_SIZE_DESC = "Size of storefiles being served."; String TOTAL_REQUEST_COUNT = "totalRequestCount"; String TOTAL_REQUEST_COUNT_DESC = - "Total number of requests this RegionServer has answered; increments the count once for " + - "EVERY access whether an admin operation, a Scan, a Put or Put of 1M rows, or a Get " + - "of a non-existent row"; + "Total number of requests this RegionServer has answered; increments the count once for " + + "EVERY access whether an admin operation, a Scan, a Put or Put of 1M rows, or a Get " + + "of a non-existent row"; String TOTAL_ROW_ACTION_REQUEST_COUNT = "totalRowActionRequestCount"; String TOTAL_ROW_ACTION_REQUEST_COUNT_DESC = - "Total number of region requests this RegionServer has answered; counts by row-level " + - "action at the RPC Server (Sums 'readRequestsCount' and 'writeRequestsCount'); counts" + - "once per access whether a Put of 1M rows or a Get that returns 1M Results"; + "Total number of region requests this RegionServer has answered; counts by row-level " + + "action at the RPC Server (Sums 'readRequestsCount' and 'writeRequestsCount'); counts" + + "once per access whether a Put of 1M rows or a Get that returns 1M Results"; String READ_REQUEST_COUNT = "readRequestCount"; String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount"; String FILTERED_READ_REQUEST_COUNT_DESC = @@ -274,8 +266,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String READ_REQUEST_RATE_DESC = "Rate of answering the read requests by this region server per second."; String WRITE_REQUEST_COUNT = "writeRequestCount"; - String WRITE_REQUEST_COUNT_DESC = - "Number of mutation requests this RegionServer has answered."; + String WRITE_REQUEST_COUNT_DESC = "Number of mutation requests this RegionServer has answered."; String WRITE_REQUEST_RATE_PER_SECOND = "writeRequestRatePerSecond"; String WRITE_REQUEST_RATE_DESC = "Rate of answering the mutation requests by this region server per second."; @@ -290,8 +281,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String STATIC_INDEX_SIZE = "staticIndexSize"; String STATIC_INDEX_SIZE_DESC = "Uncompressed size of the static indexes."; String STATIC_BLOOM_SIZE = "staticBloomSize"; - String STATIC_BLOOM_SIZE_DESC = - "Uncompressed size of the static bloom filters."; + String STATIC_BLOOM_SIZE_DESC = "Uncompressed size of the static bloom filters."; String NUMBER_OF_MUTATIONS_WITHOUT_WAL = "mutationsWithoutWALCount"; String NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC = "Number of mutations that have been sent by clients with the write ahead logging turned off."; @@ -303,7 +293,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo "The percent of HFiles that are stored on the local hdfs data node."; String PERCENT_FILES_LOCAL_SECONDARY_REGIONS = "percentFilesLocalSecondaryRegions"; String PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC = - "The percent of HFiles used by secondary regions that are stored on the local hdfs data node."; + "The percent of HFiles used by secondary regions that are stored on the local hdfs data node."; String SPLIT_QUEUE_LENGTH = "splitQueueLength"; String SPLIT_QUEUE_LENGTH_DESC = "Length of the queue for splits."; String COMPACTION_QUEUE_LENGTH = "compactionQueueLength"; @@ -317,8 +307,7 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String FLUSH_QUEUE_LENGTH = "flushQueueLength"; String FLUSH_QUEUE_LENGTH_DESC = "Length of the queue for region flushes"; String BLOCK_CACHE_FREE_SIZE = "blockCacheFreeSize"; - String BLOCK_CACHE_FREE_DESC = - "Size of the block cache that is not occupied."; + String BLOCK_CACHE_FREE_DESC = "Size of the block cache that is not occupied."; String BLOCK_CACHE_COUNT = "blockCacheCount"; String BLOCK_CACHE_COUNT_DESC = "Number of block in the block cache."; String BLOCK_CACHE_SIZE = "blockCacheSize"; @@ -336,19 +325,18 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String BLOCK_CACHE_EVICTION_COUNT = "blockCacheEvictionCount"; String BLOCK_CACHE_EVICTION_COUNT_DESC = "Count of the number of blocks evicted from the block cache." - + "(Not including blocks evicted because of HFile removal)"; + + "(Not including blocks evicted because of HFile removal)"; String BLOCK_CACHE_PRIMARY_EVICTION_COUNT = "blockCacheEvictionCountPrimary"; String BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC = "Count of the number of blocks evicted from primary replica in the block cache."; String BLOCK_CACHE_HIT_PERCENT = "blockCacheCountHitPercent"; - String BLOCK_CACHE_HIT_PERCENT_DESC = - "Percent of block cache requests that are hits"; + String BLOCK_CACHE_HIT_PERCENT_DESC = "Percent of block cache requests that are hits"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT = "blockCacheExpressHitPercent"; String BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC = "The percent of the time that requests with the cache turned on hit the cache."; String BLOCK_CACHE_FAILED_INSERTION_COUNT = "blockCacheFailedInsertionCount"; - String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = "Number of times that a block cache " + - "insertion failed. Usually due to size restrictions."; + String BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC = + "Number of times that a block cache " + "insertion failed. Usually due to size restrictions."; String BLOCK_CACHE_DATA_MISS_COUNT = "blockCacheDataMissCount"; String BLOCK_CACHE_ENCODED_DATA_MISS_COUNT = "blockCacheEncodedDataMissCount"; String BLOCK_CACHE_LEAF_INDEX_MISS_COUNT = "blockCacheLeafIndexMissCount"; @@ -419,15 +407,12 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String SLOW_DELETE_KEY = "slowDeleteCount"; String SLOW_INCREMENT_KEY = "slowIncrementCount"; String SLOW_APPEND_KEY = "slowAppendCount"; - String SLOW_PUT_DESC = - "The number of batches containing puts that took over 1000ms to complete"; + String SLOW_PUT_DESC = "The number of batches containing puts that took over 1000ms to complete"; String SLOW_DELETE_DESC = "The number of batches containing delete(s) that took over 1000ms to complete"; String SLOW_GET_DESC = "The number of Gets that took over 1000ms to complete"; - String SLOW_INCREMENT_DESC = - "The number of Increments that took over 1000ms to complete"; - String SLOW_APPEND_DESC = - "The number of Appends that took over 1000ms to complete"; + String SLOW_INCREMENT_DESC = "The number of Increments that took over 1000ms to complete"; + String SLOW_APPEND_DESC = "The number of Appends that took over 1000ms to complete"; String FLUSHED_CELLS = "flushedCellsCount"; String FLUSHED_CELLS_DESC = "The number of cells flushed to disk"; @@ -439,14 +424,12 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String COMPACTED_CELLS_SIZE_DESC = "The total amount of data processed during minor compactions, in bytes"; String MAJOR_COMPACTED_CELLS = "majorCompactedCellsCount"; - String MAJOR_COMPACTED_CELLS_DESC = - "The number of cells processed during major compactions"; + String MAJOR_COMPACTED_CELLS_DESC = "The number of cells processed during major compactions"; String MAJOR_COMPACTED_CELLS_SIZE = "majorCompactedCellsSize"; String MAJOR_COMPACTED_CELLS_SIZE_DESC = "The total amount of data processed during major compactions, in bytes"; String CELLS_COUNT_COMPACTED_TO_MOB = "cellsCountCompactedToMob"; - String CELLS_COUNT_COMPACTED_TO_MOB_DESC = - "The number of cells moved to mob during compaction"; + String CELLS_COUNT_COMPACTED_TO_MOB_DESC = "The number of cells moved to mob during compaction"; String CELLS_COUNT_COMPACTED_FROM_MOB = "cellsCountCompactedFromMob"; String CELLS_COUNT_COMPACTED_FROM_MOB_DESC = "The number of cells moved from mob during compaction"; @@ -484,18 +467,16 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo "The number of times we started a hedged read and a hedged read won"; String HEDGED_READ_IN_CUR_THREAD = "hedgedReadOpsInCurThread"; String HEDGED_READ_IN_CUR_THREAD_DESC = - "The number of times we execute a hedged read in current thread as a fallback for task rejection"; + "The number of times we execute a hedged read in current thread as a fallback for task rejection"; String TOTAL_BYTES_READ = "totalBytesRead"; String TOTAL_BYTES_READ_DESC = "The total number of bytes read from HDFS"; String LOCAL_BYTES_READ = "localBytesRead"; - String LOCAL_BYTES_READ_DESC = - "The number of bytes read from the local HDFS DataNode"; + String LOCAL_BYTES_READ_DESC = "The number of bytes read from the local HDFS DataNode"; String SHORTCIRCUIT_BYTES_READ = "shortCircuitBytesRead"; String SHORTCIRCUIT_BYTES_READ_DESC = "The number of bytes read through HDFS short circuit read"; String ZEROCOPY_BYTES_READ = "zeroCopyBytesRead"; - String ZEROCOPY_BYTES_READ_DESC = - "The number of bytes read through HDFS zero copy"; + String ZEROCOPY_BYTES_READ_DESC = "The number of bytes read through HDFS zero copy"; String BLOCKED_REQUESTS_COUNT = "blockedRequestCount"; String BLOCKED_REQUESTS_COUNT_DESC = "The number of blocked requests because of memstore size is " @@ -519,48 +500,47 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String FLUSHED_MEMSTORE_BYTES_DESC = "Total number of bytes of cells in memstore from flush"; String COMPACTION_TIME = "compactionTime"; - String COMPACTION_TIME_DESC - = "Histogram for the time in millis for compaction, both major and minor"; + String COMPACTION_TIME_DESC = + "Histogram for the time in millis for compaction, both major and minor"; String COMPACTION_INPUT_FILE_COUNT = "compactionInputFileCount"; - String COMPACTION_INPUT_FILE_COUNT_DESC - = "Histogram for the compaction input number of files, both major and minor"; + String COMPACTION_INPUT_FILE_COUNT_DESC = + "Histogram for the compaction input number of files, both major and minor"; String COMPACTION_INPUT_SIZE = "compactionInputSize"; - String COMPACTION_INPUT_SIZE_DESC - = "Histogram for the compaction total input file sizes, both major and minor"; + String COMPACTION_INPUT_SIZE_DESC = + "Histogram for the compaction total input file sizes, both major and minor"; String COMPACTION_OUTPUT_FILE_COUNT = "compactionOutputFileCount"; - String COMPACTION_OUTPUT_FILE_COUNT_DESC - = "Histogram for the compaction output number of files, both major and minor"; + String COMPACTION_OUTPUT_FILE_COUNT_DESC = + "Histogram for the compaction output number of files, both major and minor"; String COMPACTION_OUTPUT_SIZE = "compactionOutputSize"; - String COMPACTION_OUTPUT_SIZE_DESC - = "Histogram for the compaction total output file sizes, both major and minor"; + String COMPACTION_OUTPUT_SIZE_DESC = + "Histogram for the compaction total output file sizes, both major and minor"; String COMPACTED_INPUT_BYTES = "compactedInputBytes"; - String COMPACTED_INPUT_BYTES_DESC - = "Total number of bytes that is read for compaction, both major and minor"; + String COMPACTED_INPUT_BYTES_DESC = + "Total number of bytes that is read for compaction, both major and minor"; String COMPACTED_OUTPUT_BYTES = "compactedOutputBytes"; - String COMPACTED_OUTPUT_BYTES_DESC - = "Total number of bytes that is output from compaction, both major and minor"; + String COMPACTED_OUTPUT_BYTES_DESC = + "Total number of bytes that is output from compaction, both major and minor"; String MAJOR_COMPACTION_TIME = "majorCompactionTime"; - String MAJOR_COMPACTION_TIME_DESC - = "Histogram for the time in millis for compaction, major only"; + String MAJOR_COMPACTION_TIME_DESC = "Histogram for the time in millis for compaction, major only"; String MAJOR_COMPACTION_INPUT_FILE_COUNT = "majorCompactionInputFileCount"; - String MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC - = "Histogram for the compaction input number of files, major only"; + String MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC = + "Histogram for the compaction input number of files, major only"; String MAJOR_COMPACTION_INPUT_SIZE = "majorCompactionInputSize"; - String MAJOR_COMPACTION_INPUT_SIZE_DESC - = "Histogram for the compaction total input file sizes, major only"; + String MAJOR_COMPACTION_INPUT_SIZE_DESC = + "Histogram for the compaction total input file sizes, major only"; String MAJOR_COMPACTION_OUTPUT_FILE_COUNT = "majorCompactionOutputFileCount"; - String MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC - = "Histogram for the compaction output number of files, major only"; + String MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC = + "Histogram for the compaction output number of files, major only"; String MAJOR_COMPACTION_OUTPUT_SIZE = "majorCompactionOutputSize"; - String MAJOR_COMPACTION_OUTPUT_SIZE_DESC - = "Histogram for the compaction total output file sizes, major only"; + String MAJOR_COMPACTION_OUTPUT_SIZE_DESC = + "Histogram for the compaction total output file sizes, major only"; String MAJOR_COMPACTED_INPUT_BYTES = "majorCompactedInputBytes"; - String MAJOR_COMPACTED_INPUT_BYTES_DESC - = "Total number of bytes that is read for compaction, major only"; + String MAJOR_COMPACTED_INPUT_BYTES_DESC = + "Total number of bytes that is read for compaction, major only"; String MAJOR_COMPACTED_OUTPUT_BYTES = "majorCompactedOutputBytes"; - String MAJOR_COMPACTED_OUTPUT_BYTES_DESC - = "Total number of bytes that is output from compaction, major only"; + String MAJOR_COMPACTED_OUTPUT_BYTES_DESC = + "Total number of bytes that is output from compaction, major only"; String RPC_GET_REQUEST_COUNT = "rpcGetRequestCount"; String RPC_GET_REQUEST_COUNT_DESC = "Number of rpc get requests this RegionServer has answered."; @@ -599,5 +579,6 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo String ACTIVE_SCANNERS_DESC = "Gauge of currently active scanners"; String SCANNER_LEASE_EXPIRED_COUNT = "scannerLeaseExpiredCount"; - String SCANNER_LEASE_EXPIRED_COUNT_DESC = "Count of scanners which were expired due to scanner lease timeout"; + String SCANNER_LEASE_EXPIRED_COUNT_DESC = + "Count of scanners which were expired due to scanner lease timeout"; } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java index ef33909839ce..d477b64609ff 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.io.MetricsIOSource; @@ -30,7 +29,6 @@ public interface MetricsRegionServerSourceFactory { /** * Given a wrapper create a MetricsRegionServerSource. - * * @param regionServerWrapper The wrapped region server * @return a Metrics Source. */ @@ -38,7 +36,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsRegionSource from a MetricsRegionWrapper. - * * @param wrapper The wrapped region * @return A metrics region source */ @@ -58,7 +55,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsTableSource from a MetricsTableWrapper. - * * @param table The table name * @param wrapper The wrapped table aggregate * @return A metrics table source @@ -67,7 +63,6 @@ public interface MetricsRegionServerSourceFactory { /** * Get a MetricsTableAggregateSource - * * @return A metrics table aggregate source */ MetricsTableAggregateSource getTableAggregate(); @@ -80,7 +75,6 @@ public interface MetricsRegionServerSourceFactory { /** * Create a MetricsIOSource from a MetricsIOWrapper. - * * @return A metrics IO source */ MetricsIOSource createIO(MetricsIOWrapper wrapper); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java index ab2697c34f8c..6f8f8e1f5a17 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.List; @@ -35,28 +34,24 @@ public interface MetricsRegionServerWrapper { /** * Get the Cluster ID - * * @return Cluster ID */ String getClusterId(); /** * Get the ZooKeeper Quorum Info - * * @return ZooKeeper Quorum Info */ String getZookeeperQuorum(); /** * Get the co-processors - * * @return Co-processors */ String getCoprocessors(); /** * Get HRegionServer start time - * * @return Start time of RegionServer in milliseconds */ long getStartCode(); @@ -91,9 +86,9 @@ public interface MetricsRegionServerWrapper { */ long getNumWALSlowAppend(); - /** - * Get the number of store files hosted on this region server. - */ + /** + * Get the number of store files hosted on this region server. + */ long getNumStoreFiles(); /** @@ -117,12 +112,12 @@ public interface MetricsRegionServerWrapper { long getMinStoreFileAge(); /** - * @return Average age of store files hosted on this region server + * @return Average age of store files hosted on this region server */ long getAvgStoreFileAge(); /** - * @return Number of reference files on this region server + * @return Number of reference files on this region server */ long getNumReferenceFiles(); @@ -192,8 +187,8 @@ public interface MetricsRegionServerWrapper { long getNumMutationsWithoutWAL(); /** - * Ammount of data in the memstore but not in the WAL because mutations explicitly had their - * WAL turned off. + * Ammount of data in the memstore but not in the WAL because mutations explicitly had their WAL + * turned off. */ long getDataInMemoryWithoutWAL(); @@ -227,6 +222,7 @@ public interface MetricsRegionServerWrapper { int getFlushQueueSize(); long getMemStoreLimit(); + /** * Get the size (in bytes) of the block cache that is free. */ @@ -272,7 +268,6 @@ public interface MetricsRegionServerWrapper { */ long getBlockCachePrimaryEvictedCount(); - /** * Get the percent of all requests that hit the block cache. */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java index b3a556e3d9f2..386cb66ac866 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -45,7 +44,7 @@ public interface MetricsRegionSource extends Comparable { String MAX_COMPACTION_QUEUE_DESC = "Max number of compactions queued for this region"; String FLUSHES_QUEUED_DESC = "Number flushes requested/queued for this region"; String MAX_FLUSH_QUEUE_DESC = "Max number of flushes queued for this region"; - String NUM_BYTES_COMPACTED_DESC = + String NUM_BYTES_COMPACTED_DESC = "Sum of filesize on all files entering a finished, successful or aborted, compaction"; String NUM_FILES_COMPACTED_DESC = "Number of files that were input for finished, successful or aborted, compactions"; @@ -81,7 +80,7 @@ public interface MetricsRegionSource extends Comparable { /** * Update time used of resultScanner.next(). - * */ + */ void updateScanTime(long mills); /** @@ -99,5 +98,4 @@ public interface MetricsRegionSource extends Comparable { */ MetricsRegionAggregateSource getAggregateSource(); - } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java index 28f8832e7491..b20d1c1ec803 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,36 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Interface of class that will wrap an HRegion and export numbers so they can be - * used in MetricsRegionSource + * Interface of class that will wrap an HRegion and export numbers so they can be used in + * MetricsRegionSource */ @InterfaceAudience.Private public interface MetricsRegionWrapper { /** * Get the name of the table the region belongs to. - * * @return The string version of the table name. */ String getTableName(); /** * Get the name of the namespace this table is in. - * @return String version of the namespace. Can't be empty. + * @return String version of the namespace. Can't be empty. */ String getNamespace(); /** * Get the name of the region. - * * @return The encoded name of the region. */ String getRegionName(); @@ -90,12 +86,12 @@ public interface MetricsRegionWrapper { long getMinStoreFileAge(); /** - * @return Average age of store files under this region + * @return Average age of store files under this region */ long getAvgStoreFileAge(); /** - * @return Number of reference files under this region + * @return Number of reference files under this region */ long getNumReferenceFiles(); @@ -113,38 +109,38 @@ public interface MetricsRegionWrapper { long getNumCompactionsCompleted(); /** - * @return Age of the last major compaction + * @return Age of the last major compaction */ long getLastMajorCompactionAge(); /** - * Returns the total number of compactions that have been reported as failed on this region. - * Note that a given compaction can be reported as both completed and failed if an exception - * is thrown in the processing after {@code HRegion.compact()}. + * Returns the total number of compactions that have been reported as failed on this region. Note + * that a given compaction can be reported as both completed and failed if an exception is thrown + * in the processing after {@code HRegion.compact()}. */ long getNumCompactionsFailed(); /** - * @return the total number of compactions that are currently queued(or being executed) at point in - * time + * @return the total number of compactions that are currently queued(or being executed) at point + * in time */ long getNumCompactionsQueued(); /** - * @return the total number of flushes currently queued(being executed) for this region at point in - * time + * @return the total number of flushes currently queued(being executed) for this region at point + * in time */ long getNumFlushesQueued(); /** - * @return the max number of compactions queued for this region - * Note that this metric is updated periodically and hence might miss some data points + * @return the max number of compactions queued for this region Note that this metric is updated + * periodically and hence might miss some data points */ long getMaxCompactionQueueSize(); /** - * @return the max number of flushes queued for this region - * Note that this metric is updated periodically and hence might miss some data points + * @return the max number of flushes queued for this region Note that this metric is updated + * periodically and hence might miss some data points */ long getMaxFlushQueueSize(); @@ -161,8 +157,8 @@ public interface MetricsRegionWrapper { long getStoreRefCount(); /** - * @return the max number of references active on any store file among - * all compacted store files that belong to this region + * @return the max number of references active on any store file among all compacted store files + * that belong to this region */ long getMaxCompactedStoreFileRefCount(); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java index f746c98c5458..e11f1864f484 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,15 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** - * This interface will be implemented by a MetricsSource that will export metrics from - * multiple regions of a table into the hadoop metrics system. + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * regions of a table into the hadoop metrics system. */ @InterfaceAudience.Private public interface MetricsTableAggregateSource extends BaseSource { @@ -59,7 +58,6 @@ public interface MetricsTableAggregateSource extends BaseSource { /** * Remove a table's source. This is called when regions of a table are closed. - * * @param table The table name */ void deleteTableSource(String table); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java index 2aeb82b0d64d..aab2abdc4217 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatencies.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -59,7 +60,6 @@ public interface MetricsTableLatencies { /** * Update the Put time histogram - * * @param tableName The table the metric is for * @param t time it took */ @@ -67,7 +67,6 @@ public interface MetricsTableLatencies { /** * Update the batch Put time histogram - * * @param tableName The table the metric is for * @param t time it took */ @@ -75,7 +74,6 @@ public interface MetricsTableLatencies { /** * Update the Delete time histogram - * * @param tableName The table the metric is for * @param t time it took */ @@ -83,7 +81,6 @@ public interface MetricsTableLatencies { /** * Update the batch Delete time histogram - * * @param tableName The table the metric is for * @param t time it took */ @@ -91,7 +88,6 @@ public interface MetricsTableLatencies { /** * Update the Get time histogram . - * * @param tableName The table the metric is for * @param t time it took */ @@ -99,7 +95,6 @@ public interface MetricsTableLatencies { /** * Update the Increment time histogram. - * * @param tableName The table the metric is for * @param t time it took */ @@ -107,7 +102,6 @@ public interface MetricsTableLatencies { /** * Update the Append time histogram. - * * @param tableName The table the metric is for * @param t time it took */ @@ -115,7 +109,6 @@ public interface MetricsTableLatencies { /** * Update the scan size. - * * @param tableName The table the metric is for * @param scanSize size of the scan */ @@ -123,7 +116,6 @@ public interface MetricsTableLatencies { /** * Update the scan time. - * * @param tableName The table the metric is for * @param t time it took */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java index c3b819228fe4..1847d407a010 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeter.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java index 9fc606257e0c..bd7be1783834 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.io.Closeable; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java index e8eae0d77168..ad72876f02e8 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperAggregate.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Interface of class that will wrap a MetricsTableSource and export numbers so they can be - * used in MetricsTableSource + * Interface of class that will wrap a MetricsTableSource and export numbers so they can be used in + * MetricsTableSource */ @InterfaceAudience.Private public interface MetricsTableWrapperAggregate { public String HASH = "#"; + /** * Get the number of read requests that have been issued against this table */ @@ -38,6 +37,7 @@ public interface MetricsTableWrapperAggregate { * Get the number of write requests that have been issued against this table */ long getFilteredReadRequestCount(String table); + /** * Get the number of write requests that have been issued for this table */ @@ -63,7 +63,6 @@ public interface MetricsTableWrapperAggregate { */ long getTableSize(String table); - /** * Get the average region size for this table */ @@ -95,12 +94,12 @@ public interface MetricsTableWrapperAggregate { long getMinStoreFileAge(String table); /** - * @return Average age of store files for this table + * @return Average age of store files for this table */ long getAvgStoreFileAge(String table); /** - * @return Number of reference files for this table + * @return Number of reference files for this table */ long getNumReferenceFiles(String table); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java index ee570f00d999..fe5b2ab47536 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.yetus.audience.InterfaceAudience; /** -* This interface will be implemented by a MetricsSource that will export metrics from -* multiple users into the hadoop metrics system. -*/ + * This interface will be implemented by a MetricsSource that will export metrics from multiple + * users into the hadoop metrics system. + */ @InterfaceAudience.Private public interface MetricsUserAggregateSource extends BaseSource { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java index 96173669bbc3..2d75c9246ba2 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; - import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public interface MetricsUserSource extends Comparable { - //These client metrics will be reported through clusterStatus and hbtop only + // These client metrics will be reported through clusterStatus and hbtop only interface ClientMetrics { void incrementReadRequest(); @@ -66,15 +64,14 @@ interface ClientMetrics { void getMetrics(MetricsCollector metricsCollector, boolean all); /** - * Metrics collected at client level for a user(needed for reporting through clusterStatus - * and hbtop currently) + * Metrics collected at client level for a user(needed for reporting through clusterStatus and + * hbtop currently) * @return metrics per hostname */ Map getClientMetrics(); /** * Create a instance of ClientMetrics if not present otherwise return the previous one - * * @param hostName hostname of the client * @return Instance of ClientMetrics */ diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java index 4a430cdc434e..cd2e339b5452 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import org.apache.hadoop.hbase.TableName; @@ -28,7 +27,6 @@ @InterfaceAudience.Private public interface MetricsWALSource extends BaseSource { - /** * The name of the metrics */ @@ -49,7 +47,6 @@ public interface MetricsWALSource extends BaseSource { */ String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME; - String APPEND_TIME = "appendTime"; String APPEND_TIME_DESC = "Time an append to the log took."; String APPEND_COUNT = "appendCount"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java index 2498e3426a5d..ff594412fe9a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -28,9 +27,14 @@ public interface MetricsReplicationSinkSource { public static final String SINK_APPLIED_HFILES = "sink.appliedHFiles"; void setLastAppliedOpAge(long age); + void incrAppliedBatches(long batches); + void incrAppliedOps(long batchsize); + long getLastAppliedOpAge(); + void incrAppliedHFiles(long hfileSize); + long getSinkAppliedOps(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java index 6fb5d71ef02f..a891b7732880 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java index 5e4ad27e0912..9c567d8bec25 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.yetus.audience.InterfaceAudience; @@ -23,7 +22,10 @@ @InterfaceAudience.Private public interface MetricsReplicationSourceFactory { public MetricsReplicationSinkSource getSink(); + public MetricsReplicationSourceSource getSource(String id); + public MetricsReplicationTableSource getTableSource(String tableName); + public MetricsReplicationGlobalSourceSource getGlobalSource(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java index a6cf79b710fe..66ec0c584f8f 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -56,35 +55,66 @@ public interface MetricsReplicationSourceSource extends BaseSource { public static final String SOURCE_INITIALIZING = "source.numInitializing"; void setLastShippedAge(long age); + void incrSizeOfLogQueue(int size); + void decrSizeOfLogQueue(int size); + void incrLogEditsFiltered(long size); + void incrBatchesShipped(int batches); + void incrOpsShipped(long ops); + void incrShippedBytes(long size); + void incrLogReadInBytes(long size); + void incrLogReadInEdits(long size); + void clear(); + long getLastShippedAge(); + int getSizeOfLogQueue(); + void incrHFilesShipped(long hfiles); + void incrSizeOfHFileRefsQueue(long size); + void decrSizeOfHFileRefsQueue(long size); + void incrUnknownFileLengthForClosedWAL(); + void incrUncleanlyClosedWALs(); + long getUncleanlyClosedWALs(); + void incrBytesSkippedInUncleanlyClosedWALs(final long bytes); + void incrRestartedWALReading(); + void incrRepeatedFileBytes(final long bytes); + void incrCompletedWAL(); + void incrCompletedRecoveryQueue(); + void incrFailedRecoveryQueue(); + long getWALEditsRead(); + long getShippedOps(); + long getEditsFiltered(); + void setOldestWalAge(long age); + long getOldestWalAge(); + void incrSourceInitializing(); + void decrSourceInitializing(); + int getSourceInitializing(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java index faa944a6870d..c4550abb6e83 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -25,8 +24,12 @@ public interface MetricsReplicationTableSource extends BaseSource { void setLastShippedAge(long age); + void incrShippedBytes(long size); + long getShippedBytes(); + void clear(); + long getLastShippedAge(); } diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java index 22c8753b8e3d..0c944545c620 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -64,91 +63,78 @@ public interface MetricsRESTSource extends BaseSource, JvmPauseMonitorSource { /** * Increment the number of requests - * * @param inc Ammount to increment by */ void incrementRequests(int inc); /** * Increment the number of successful Get requests. - * * @param inc Number of successful get requests. */ void incrementSucessfulGetRequests(int inc); /** * Increment the number of successful Put requests. - * * @param inc Number of successful put requests. */ void incrementSucessfulPutRequests(int inc); /** * Increment the number of successful Delete requests. - * * @param inc */ void incrementSucessfulDeleteRequests(int inc); /** * Increment the number of failed Put Requests. - * * @param inc Number of failed Put requests. */ void incrementFailedPutRequests(int inc); /** * Increment the number of failed Get requests. - * * @param inc The number of failed Get Requests. */ void incrementFailedGetRequests(int inc); /** * Increment the number of failed Delete requests. - * * @param inc The number of failed delete requests. */ void incrementFailedDeleteRequests(int inc); /** * Increment the number of successful scan requests. - * * @param inc Number of successful scan requests. */ void incrementSucessfulScanRequests(final int inc); /** * Increment the number failed scan requests. - * * @param inc Number of failed scan requests. */ void incrementFailedScanRequests(final int inc); /** * Increment the number of successful append requests. - * * @param inc Number of successful append requests. */ void incrementSucessfulAppendRequests(final int inc); /** * Increment the number failed append requests. - * * @param inc Number of failed append requests. */ void incrementFailedAppendRequests(final int inc); /** * Increment the number of successful increment requests. - * * @param inc Number of successful increment requests. */ void incrementSucessfulIncrementRequests(final int inc); /** * Increment the number failed increment requests. - * * @param inc Number of failed increment requests. */ void incrementFailedIncrementRequests(final int inc); diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java index 1e95782c2ba6..05385927672b 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSource; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java index 7918d7360d8c..1dd7d177aadc 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,12 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.yetus.audience.InterfaceAudience; -/** Factory that will be used to create metrics sources for the two diffent types of thrift servers. */ +/** + * Factory that will be used to create metrics sources for the two diffent types of thrift servers. + */ @InterfaceAudience.Private public interface MetricsThriftServerSourceFactory { diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java index 3e971243d09a..ce93143d884a 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,15 +51,18 @@ public interface MetricsZooKeeperSource extends BaseSource { String EXCEPTION_CONNECTIONLOSS = "CONNECTIONLOSS Exception"; String EXCEPTION_CONNECTIONLOSS_DESC = "Number of failed ops due to a CONNECTIONLOSS exception."; String EXCEPTION_DATAINCONSISTENCY = "DATAINCONSISTENCY Exception"; - String EXCEPTION_DATAINCONSISTENCY_DESC = "Number of failed ops due to a DATAINCONSISTENCY exception."; + String EXCEPTION_DATAINCONSISTENCY_DESC = + "Number of failed ops due to a DATAINCONSISTENCY exception."; String EXCEPTION_INVALIDACL = "INVALIDACL Exception"; String EXCEPTION_INVALIDACL_DESC = "Number of failed ops due to an INVALIDACL exception"; String EXCEPTION_NOAUTH = "NOAUTH Exception"; String EXCEPTION_NOAUTH_DESC = "Number of failed ops due to a NOAUTH exception."; String EXCEPTION_OPERATIONTIMEOUT = "OPERATIONTIMEOUT Exception"; - String EXCEPTION_OPERATIONTIMEOUT_DESC = "Number of failed ops due to an OPERATIONTIMEOUT exception."; + String EXCEPTION_OPERATIONTIMEOUT_DESC = + "Number of failed ops due to an OPERATIONTIMEOUT exception."; String EXCEPTION_RUNTIMEINCONSISTENCY = "RUNTIMEINCONSISTENCY Exception"; - String EXCEPTION_RUNTIMEINCONSISTENCY_DESC = "Number of failed ops due to a RUNTIMEINCONSISTENCY exception."; + String EXCEPTION_RUNTIMEINCONSISTENCY_DESC = + "Number of failed ops due to a RUNTIMEINCONSISTENCY exception."; String EXCEPTION_SESSIONEXPIRED = "SESSIONEXPIRED Exception"; String EXCEPTION_SESSIONEXPIRED_DESC = "Number of failed ops due to a SESSIONEXPIRED exception."; String EXCEPTION_SYSTEMERROR = "SYSTEMERROR Exception"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java index bc1e8cb242f7..a4a37c0b6b71 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,19 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2; import org.apache.yetus.audience.InterfaceAudience; /** - * Metrics Histogram interface. Implementing classes will expose computed - * quartile values through the metrics system. + * Metrics Histogram interface. Implementing classes will expose computed quartile values through + * the metrics system. */ @InterfaceAudience.Private public interface MetricHistogram { - //Strings used to create metrics names. + // Strings used to create metrics names. String NUM_OPS_METRIC_NAME = "_num_ops"; String MIN_METRIC_NAME = "_min"; String MAX_METRIC_NAME = "_max"; diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java index 1366fd0b9205..33b6c0d9a934 100644 --- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java +++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/metrics2/MetricsExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2; import java.util.concurrent.ScheduledExecutorService; diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java index 157327babb28..cb56b7faf58c 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/HadoopShims.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,23 +15,21 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; - /** * A compatibility shim layer for interacting with different versions of Hadoop. */ -//NOTE: we can move this under src/main if main code wants to use this shim layer +// NOTE: we can move this under src/main if main code wants to use this shim layer public interface HadoopShims { /** * Returns a TaskAttemptContext instance created from the given parameters. * @param job an instance of o.a.h.mapreduce.Job * @param taskId an identifier for the task attempt id. Should be parsable by - * TaskAttemptId.forName() + * TaskAttemptId.forName() * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext */ - T createTestTaskAttemptContext(final J job, final String taskId); + T createTestTaskAttemptContext(final J job, final String taskId); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java index f72843cc4b01..8ea3da856af0 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; public interface RandomStringGenerator { diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java index 91cd19ef009c..eb9083a43021 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/RandomStringGeneratorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,10 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; - import java.util.UUID; import java.util.concurrent.ThreadLocalRandom; @@ -27,8 +25,8 @@ public class RandomStringGeneratorImpl implements RandomStringGenerator { private final String s; public RandomStringGeneratorImpl() { - s = new UUID(ThreadLocalRandom.current().nextLong(), - ThreadLocalRandom.current().nextLong()).toString(); + s = new UUID(ThreadLocalRandom.current().nextLong(), ThreadLocalRandom.current().nextLong()) + .toString(); } @Override diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java index 27888db0f6d2..eec399c1ca68 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/TestCompatibilitySingletonFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestCompatibilitySingletonFactory { @ClassRule @@ -47,8 +47,7 @@ private class TestCompatibilitySingletonFactoryCallable implements Callable callables = new ArrayList<>(ITERATIONS); List resultStrings = new ArrayList<>(ITERATIONS); - // Create the callables. for (int i = 0; i < ITERATIONS; i++) { callables.add(new TestCompatibilitySingletonFactoryCallable()); @@ -77,7 +75,6 @@ public void testGetInstance() throws Exception { // Get the first string. String firstString = resultStrings.get(0); - // Assert that all the strings are equal to the fist. for (String s : resultStrings) { assertEquals(firstString, s); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java index a49c8a7b8925..a359c698ebe7 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +26,18 @@ import org.junit.experimental.categories.Category; /** - * Test for the CompatibilitySingletonFactory and building MetricsMasterSource + * Test for the CompatibilitySingletonFactory and building MetricsMasterSource */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsMasterSourceFactory.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java index 3c2a21d1533e..79dbe6c4c238 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +26,18 @@ import org.junit.experimental.categories.Category; /** - * Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource + * Test for the CompatibilitySingletonFactory and building MetricsRegionServerSource */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionServerSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsRegionServerSourceFactory.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java index 2a804158e9ee..ed9f8b18b5f0 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,16 +25,16 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsWALSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsWALSource.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java index 19e041193eec..946ea696e1b4 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +26,18 @@ import org.junit.experimental.categories.Category; /** - * Test for the CompatibilitySingletonFactory and building MetricsReplicationSource + * Test for the CompatibilitySingletonFactory and building MetricsReplicationSource */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsReplicationSourceFactory.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); } } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java index fc4caae0b205..407a9a562055 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,18 +26,18 @@ import org.junit.experimental.categories.Category; /** - * Test of Rest Metrics Source interface. + * Test of Rest Metrics Source interface. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRESTSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsRESTSource.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java index 49d25723b880..bcd7d9dfcefb 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.test; import org.apache.hadoop.hbase.metrics.BaseSource; @@ -24,157 +23,141 @@ public interface MetricsAssertHelper { /** - * Init helper. This method will make sure that the metrics system is set - * up for tests. + * Init helper. This method will make sure that the metrics system is set up for tests. */ void init(); /** * Assert that a tag exists and has a given value. - * - * @param name The name of the tag. + * @param name The name of the tag. * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertTag(String name, String expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGauge(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeGt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeLt(String name, long expected, BaseSource source); /** * Assert that a gauge exists and that it's value is equal to the expected value. - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected The expected value of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGauge(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is greater than a given value - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected Value that the gauge is expected to be greater than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeGt(String name, double expected, BaseSource source); /** * Assert that a gauge exists and it's value is less than a given value - * - * @param name The name of the gauge + * @param name The name of the gauge * @param expected Value that the gauge is expected to be less than - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertGaugeLt(String name, double expected, BaseSource source); /** * Assert that a counter exists and that it's value is equal to the expected value. - * - * @param name The name of the counter. + * @param name The name of the counter. * @param expected The expected value - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounter(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is greater than the given value. - * - * @param name The name of the counter. + * @param name The name of the counter. * @param expected The value the counter is expected to be greater than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounterGt(String name, long expected, BaseSource source); /** * Assert that a counter exists and that it's value is less than the given value. - * - * @param name The name of the counter. + * @param name The name of the counter. * @param expected The value the counter is expected to be less than. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. */ void assertCounterLt(String name, long expected, BaseSource source); /** * Get the value of a counter. - * - * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the counter. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return long value of the counter. */ long getCounter(String name, BaseSource source); /** * Check if a dynamic counter exists. - * - * @param name name of the counter. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the counter. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return boolean true if counter metric exists. */ boolean checkCounterExists(String name, BaseSource source); /** * Check if a gauge exists. - * - * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the gauge. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return boolean true if gauge metric exists. */ boolean checkGaugeExists(String name, BaseSource source); /** * Get the value of a gauge as a double. - * - * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the gauge. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return double value of the gauge. */ double getGaugeDouble(String name, BaseSource source); /** * Get the value of a gauge as a long. - * - * @param name name of the gauge. - * @param source The BaseSource{@link BaseSource} that will provide the tags, - * gauges, and counters. + * @param name name of the gauge. + * @param source The BaseSource{@link BaseSource} that will provide the tags, gauges, and + * counters. * @return long value of the gauge. */ long getGaugeLong(String name, BaseSource source); diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java index c49a2a51ee56..360c27bcc58c 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,17 +28,16 @@ /** * Test for the interface of MetricsThriftServerSourceFactory */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsThriftServerSourceFactory { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsThriftServerSourceFactory.class); - - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws RuntimeException { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class); } diff --git a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java index ca7ba2402e65..c6fd9ab191b4 100644 --- a/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java +++ b/hbase-hadoop-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,16 +25,16 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsZooKeeperSource { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMetricsZooKeeperSource.class); - @Test(expected=RuntimeException.class) + @Test(expected = RuntimeException.class) public void testGetInstanceNoHadoopCompat() throws Exception { - //This should throw an exception because there is no compat lib on the class path. + // This should throw an exception because there is no compat lib on the class path. CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); } } diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml index db0b47fb9a5e..3d6351af7e32 100644 --- a/hbase-hadoop2-compat/pom.xml +++ b/hbase-hadoop2-compat/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-hadoop2-compat Apache HBase - Hadoop Two Compatibility - - Interfaces to be implemented in order to smooth - over hadoop version differences - - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-dependency-plugin - - - create-mrapp-generated-classpath - generate-test-resources - - build-classpath - - - - - ${project.build.directory}/test-classes/mrapp-generated-classpath - - - - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - + Interfaces to be implemented in order to smooth + over hadoop version differences @@ -200,21 +149,68 @@ limitations under the License. test + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-dependency-plugin + + + create-mrapp-generated-classpath + + build-classpath + + generate-test-resources + + + ${project.build.directory}/test-classes/mrapp-generated-classpath + + + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + - - - skipHadoopTwoCompatTests - - - skipHadoopTwoCompatTests - - - - true - true - - + + + skipHadoopTwoCompatTests + + + skipHadoopTwoCompatTests + + + + true + true + + eclipse-specific diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java index edbb9257c282..f7f3a17da0c6 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/io/MetricsIOSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.io; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -38,21 +37,18 @@ public MetricsIOSourceImpl(MetricsIOWrapper wrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); } - public MetricsIOSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsIOWrapper wrapper) { + public MetricsIOSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext, MetricsIOWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; - fsReadTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); - fsPReadTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); - fsWriteTimeHisto = getMetricsRegistry() - .newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); + fsReadTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_READ_TIME_HISTO_KEY, FS_READ_TIME_HISTO_DESC); + fsPReadTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_PREAD_TIME_HISTO_KEY, FS_PREAD_TIME_HISTO_DESC); + fsWriteTimeHisto = + getMetricsRegistry().newTimeHistogram(FS_WRITE_HISTO_KEY, FS_WRITE_TIME_HISTO_DESC); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java index 67325c0728e5..118ed939b265 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceFactoryImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,18 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import java.util.HashMap; import java.util.Locale; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsHBaseServerSourceFactoryImpl extends MetricsHBaseServerSourceFactory { private enum SourceStorage { INSTANCE; + HashMap sources = new HashMap<>(); } @@ -37,19 +35,16 @@ public MetricsHBaseServerSource create(String serverName, MetricsHBaseServerWrap } private static synchronized MetricsHBaseServerSource getSource(String serverName, - MetricsHBaseServerWrapper wrap) { + MetricsHBaseServerWrapper wrap) { String context = createContextName(serverName); MetricsHBaseServerSource source = SourceStorage.INSTANCE.sources.get(context); if (source == null) { - //Create the source. - source = new MetricsHBaseServerSourceImpl( - context, - METRICS_DESCRIPTION, - context.toLowerCase(Locale.ROOT), - context + METRICS_JMX_CONTEXT_SUFFIX, wrap); - - //Store back in storage + // Create the source. + source = new MetricsHBaseServerSourceImpl(context, METRICS_DESCRIPTION, + context.toLowerCase(Locale.ROOT), context + METRICS_JMX_CONTEXT_SUFFIX, wrap); + + // Store back in storage SourceStorage.INSTANCE.sources.put(context, source); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java index e4fee95e2c4d..ede600928ea0 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/ipc/MetricsHBaseServerSourceImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; @@ -39,45 +37,40 @@ public class MetricsHBaseServerSourceImpl extends ExceptionTrackingSourceImpl private final MutableFastCounter sentBytes; private final MutableFastCounter receivedBytes; - private MetricHistogram queueCallTime; private MetricHistogram processCallTime; private MetricHistogram totalCallTime; private MetricHistogram requestSize; private MetricHistogram responseSize; - public MetricsHBaseServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsHBaseServerWrapper wrapper) { + public MetricsHBaseServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsHBaseServerWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; this.authorizationSuccesses = this.getMetricsRegistry().newCounter(AUTHORIZATION_SUCCESSES_NAME, - AUTHORIZATION_SUCCESSES_DESC, 0L); + AUTHORIZATION_SUCCESSES_DESC, 0L); this.authorizationFailures = this.getMetricsRegistry().newCounter(AUTHORIZATION_FAILURES_NAME, - AUTHORIZATION_FAILURES_DESC, 0L); - this.authenticationSuccesses = this.getMetricsRegistry().newCounter( - AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); + AUTHORIZATION_FAILURES_DESC, 0L); + this.authenticationSuccesses = this.getMetricsRegistry() + .newCounter(AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L); this.authenticationFailures = this.getMetricsRegistry().newCounter(AUTHENTICATION_FAILURES_NAME, - AUTHENTICATION_FAILURES_DESC, 0L); - this.authenticationFallbacks = this.getMetricsRegistry().newCounter( - AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); - this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, - SENT_BYTES_DESC, 0L); - this.receivedBytes = this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, - RECEIVED_BYTES_DESC, 0L); - this.queueCallTime = this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, - QUEUE_CALL_TIME_DESC); - this.processCallTime = this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, - PROCESS_CALL_TIME_DESC); - this.totalCallTime = this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, - TOTAL_CALL_TIME_DESC); - this.requestSize = this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, - REQUEST_SIZE_DESC); - this.responseSize = this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, - RESPONSE_SIZE_DESC); + AUTHENTICATION_FAILURES_DESC, 0L); + this.authenticationFallbacks = this.getMetricsRegistry() + .newCounter(AUTHENTICATION_FALLBACKS_NAME, AUTHENTICATION_FALLBACKS_DESC, 0L); + this.sentBytes = this.getMetricsRegistry().newCounter(SENT_BYTES_NAME, SENT_BYTES_DESC, 0L); + this.receivedBytes = + this.getMetricsRegistry().newCounter(RECEIVED_BYTES_NAME, RECEIVED_BYTES_DESC, 0L); + this.queueCallTime = + this.getMetricsRegistry().newTimeHistogram(QUEUE_CALL_TIME_NAME, QUEUE_CALL_TIME_DESC); + this.processCallTime = + this.getMetricsRegistry().newTimeHistogram(PROCESS_CALL_TIME_NAME, PROCESS_CALL_TIME_DESC); + this.totalCallTime = + this.getMetricsRegistry().newTimeHistogram(TOTAL_CALL_TIME_NAME, TOTAL_CALL_TIME_DESC); + this.requestSize = + this.getMetricsRegistry().newSizeHistogram(REQUEST_SIZE_NAME, REQUEST_SIZE_DESC); + this.responseSize = + this.getMetricsRegistry().newSizeHistogram(RESPONSE_SIZE_NAME, RESPONSE_SIZE_DESC); } @Override @@ -147,17 +140,17 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { if (wrapper != null) { mrb.addGauge(Interns.info(QUEUE_SIZE_NAME, QUEUE_SIZE_DESC), wrapper.getTotalQueueSize()) .addGauge(Interns.info(GENERAL_QUEUE_NAME, GENERAL_QUEUE_DESC), - wrapper.getGeneralQueueLength()) - .addGauge(Interns.info(REPLICATION_QUEUE_NAME, - REPLICATION_QUEUE_DESC), wrapper.getReplicationQueueLength()) + wrapper.getGeneralQueueLength()) + .addGauge(Interns.info(REPLICATION_QUEUE_NAME, REPLICATION_QUEUE_DESC), + wrapper.getReplicationQueueLength()) .addGauge(Interns.info(PRIORITY_QUEUE_NAME, PRIORITY_QUEUE_DESC), - wrapper.getPriorityQueueLength()) + wrapper.getPriorityQueueLength()) .addGauge(Interns.info(METAPRIORITY_QUEUE_NAME, METAPRIORITY_QUEUE_DESC), - wrapper.getMetaPriorityQueueLength()) - .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, - NUM_OPEN_CONNECTIONS_DESC), wrapper.getNumOpenConnections()) - .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, - NUM_ACTIVE_HANDLER_DESC), wrapper.getActiveRpcHandlerCount()) + wrapper.getMetaPriorityQueueLength()) + .addGauge(Interns.info(NUM_OPEN_CONNECTIONS_NAME, NUM_OPEN_CONNECTIONS_DESC), + wrapper.getNumOpenConnections()) + .addGauge(Interns.info(NUM_ACTIVE_HANDLER_NAME, NUM_ACTIVE_HANDLER_DESC), + wrapper.getActiveRpcHandlerCount()) .addGauge(Interns.info(NUM_ACTIVE_GENERAL_HANDLER_NAME, NUM_ACTIVE_GENERAL_HANDLER_DESC), wrapper.getActiveGeneralRpcHandlerCount()) .addGauge( @@ -166,16 +159,13 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { .addGauge( Interns.info(NUM_ACTIVE_REPLICATION_HANDLER_NAME, NUM_ACTIVE_REPLICATION_HANDLER_DESC), wrapper.getActiveReplicationRpcHandlerCount()) - .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME, - NUM_GENERAL_CALLS_DROPPED_DESC), wrapper.getNumGeneralCallsDropped()) - .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME, - NUM_LIFO_MODE_SWITCHES_DESC), wrapper.getNumLifoModeSwitches()) - .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC), - wrapper.getWriteQueueLength()) - .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC), - wrapper.getReadQueueLength()) - .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC), - wrapper.getScanQueueLength()) + .addCounter(Interns.info(NUM_GENERAL_CALLS_DROPPED_NAME, NUM_GENERAL_CALLS_DROPPED_DESC), + wrapper.getNumGeneralCallsDropped()) + .addCounter(Interns.info(NUM_LIFO_MODE_SWITCHES_NAME, NUM_LIFO_MODE_SWITCHES_DESC), + wrapper.getNumLifoModeSwitches()) + .addGauge(Interns.info(WRITE_QUEUE_NAME, WRITE_QUEUE_DESC), wrapper.getWriteQueueLength()) + .addGauge(Interns.info(READ_QUEUE_NAME, READ_QUEUE_DESC), wrapper.getReadQueueLength()) + .addGauge(Interns.info(SCAN_QUEUE_NAME, SCAN_QUEUE_DESC), wrapper.getScanQueueLength()) .addGauge(Interns.info(NUM_ACTIVE_WRITE_HANDLER_NAME, NUM_ACTIVE_WRITE_HANDLER_DESC), wrapper.getActiveWriteRpcHandlerCount()) .addGauge(Interns.info(NUM_ACTIVE_READ_HANDLER_NAME, NUM_ACTIVE_READ_HANDLER_DESC), diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java index b4f62b3970b7..413ea5399214 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/mapreduce/JobUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.Cluster; @@ -43,27 +41,24 @@ protected JobUtil() { /** * Initializes the staging directory and returns the path. - * * @param conf system configuration * @return staging directory path * @throws IOException if the ownership on the staging directory is not as expected * @throws InterruptedException if the thread getting the staging directory is interrupted */ - public static Path getStagingDir(Configuration conf) - throws IOException, InterruptedException { + public static Path getStagingDir(Configuration conf) throws IOException, InterruptedException { return JobSubmissionFiles.getStagingDir(new Cluster(conf), conf); } /** * Initializes the staging directory and returns the qualified path. - * * @param conf conf system configuration * @return qualified staging directory path * @throws IOException if the ownership on the staging directory is not as expected * @throws InterruptedException if the thread getting the staging directory is interrupted */ public static Path getQualifiedStagingDir(Configuration conf) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Cluster cluster = new Cluster(conf); Path stagingDir = JobSubmissionFiles.getStagingDir(cluster, conf); return cluster.getFileSystem().makeQualified(stagingDir); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java index c78464da372e..523ffce52434 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsAssignmentManagerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -28,8 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsAssignmentManagerSourceImpl - extends BaseSourceImpl +public class MetricsAssignmentManagerSourceImpl extends BaseSourceImpl implements MetricsAssignmentManagerSource { private MutableGaugeLong ritGauge; @@ -63,16 +61,15 @@ public MetricsAssignmentManagerSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsAssignmentManagerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsAssignmentManagerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } public void init() { ritGauge = metricsRegistry.newGauge(RIT_COUNT_NAME, RIT_COUNT_DESC, 0L); - ritCountOverThresholdGauge = metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, - RIT_COUNT_OVER_THRESHOLD_DESC,0L); + ritCountOverThresholdGauge = + metricsRegistry.newGauge(RIT_COUNT_OVER_THRESHOLD_NAME, RIT_COUNT_OVER_THRESHOLD_DESC, 0L); ritOldestAgeGauge = metricsRegistry.newGauge(RIT_OLDEST_AGE_NAME, RIT_OLDEST_AGE_DESC, 0L); ritDurationHisto = metricsRegistry.newTimeHistogram(RIT_DURATION_NAME, RIT_DURATION_DESC); operationCounter = metricsRegistry.getCounter(OPERATION_COUNT_NAME, 0L); @@ -94,8 +91,8 @@ public void init() { metricsRegistry.newGauge(EMPTY_REGION_INFO_REGIONS, EMPTY_REGION_INFO_REGIONS_DESC, 0L); /** - * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is - * moving away from using Hadoop's metric2 to having independent HBase specific Metrics. Use + * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is moving + * away from using Hadoop's metric2 to having independent HBase specific Metrics. Use * {@link BaseSourceImpl#registry} to register the new metrics. */ assignMetrics = new OperationMetrics(registry, ASSIGN_METRIC_PREFIX); @@ -222,7 +219,7 @@ public OperationMetrics getCloseMetrics() { public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder metricsRecordBuilder = metricsCollector.addRecord(metricsName); metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java index d78efce2add9..b9a093c759e2 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterFilesystemSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,8 +22,7 @@ import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsMasterFilesystemSourceImpl - extends BaseSourceImpl +public class MetricsMasterFilesystemSourceImpl extends BaseSourceImpl implements MetricsMasterFileSystemSource { private MetricHistogram splitSizeHisto; @@ -36,9 +34,8 @@ public MetricsMasterFilesystemSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsMasterFilesystemSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsMasterFilesystemSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -62,7 +59,6 @@ public void updateSplitSize(long size) { splitSizeHisto.add(size); } - @Override public void updateMetaWALSplitTime(long time) { metaSplitTimeHisto.add(time); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java index 6fd254e9a690..dc5773cb9046 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java index 69e7d7958fab..c1195c8c61b1 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterProcSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,29 +24,20 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, + * following the pattern */ @InterfaceAudience.Private -public class MetricsMasterProcSourceImpl - extends BaseSourceImpl implements MetricsMasterProcSource { +public class MetricsMasterProcSourceImpl extends BaseSourceImpl implements MetricsMasterProcSource { private final MetricsMasterWrapper masterWrapper; public MetricsMasterProcSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MetricsMasterProcSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { + public MetricsMasterProcSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; @@ -64,13 +54,12 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // masterWrapper can be null because this function is called inside of init. if (masterWrapper != null) { - metricsRecordBuilder - .addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), - masterWrapper.getNumWALFiles()); + metricsRecordBuilder.addGauge(Interns.info(NUM_MASTER_WALS_NAME, NUM_MASTER_WALS_DESC), + masterWrapper.getNumWALFiles()); } metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java index 0fae0e744059..6a489eb70019 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceFactoryImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java index 750c1c959fcb..7c28e22035be 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterQuotaSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,7 +19,6 @@ import java.util.Map; import java.util.Map.Entry; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; @@ -33,7 +33,7 @@ */ @InterfaceAudience.Private public class MetricsMasterQuotaSourceImpl extends BaseSourceImpl - implements MetricsMasterQuotaSource { + implements MetricsMasterQuotaSource { private final MetricsMasterWrapper wrapper; private final MutableGaugeLong spaceQuotasGauge; private final MutableGaugeLong tablesViolatingQuotasGauge; @@ -48,30 +48,29 @@ public MetricsMasterQuotaSourceImpl(MetricsMasterWrapper wrapper) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, wrapper); } - public MetricsMasterQuotaSourceImpl( - String metricsName, String metricsDescription, String metricsContext, - String metricsJmxContext, MetricsMasterWrapper wrapper) { + public MetricsMasterQuotaSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper wrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.wrapper = wrapper; - spaceQuotasGauge = getMetricsRegistry().newGauge( - NUM_SPACE_QUOTAS_NAME, NUM_SPACE_QUOTAS_DESC, 0L); - tablesViolatingQuotasGauge = getMetricsRegistry().newGauge( - NUM_TABLES_QUOTA_VIOLATIONS_NAME, NUM_TABLES_QUOTA_VIOLATIONS_DESC, 0L); - namespacesViolatingQuotasGauge = getMetricsRegistry().newGauge( - NUM_NS_QUOTA_VIOLATIONS_NAME, NUM_NS_QUOTA_VIOLATIONS_DESC, 0L); - regionSpaceReportsGauge = getMetricsRegistry().newGauge( - NUM_REGION_SIZE_REPORTS_NAME, NUM_REGION_SIZE_REPORTS_DESC, 0L); + spaceQuotasGauge = + getMetricsRegistry().newGauge(NUM_SPACE_QUOTAS_NAME, NUM_SPACE_QUOTAS_DESC, 0L); + tablesViolatingQuotasGauge = getMetricsRegistry().newGauge(NUM_TABLES_QUOTA_VIOLATIONS_NAME, + NUM_TABLES_QUOTA_VIOLATIONS_DESC, 0L); + namespacesViolatingQuotasGauge = getMetricsRegistry().newGauge(NUM_NS_QUOTA_VIOLATIONS_NAME, + NUM_NS_QUOTA_VIOLATIONS_DESC, 0L); + regionSpaceReportsGauge = getMetricsRegistry().newGauge(NUM_REGION_SIZE_REPORTS_NAME, + NUM_REGION_SIZE_REPORTS_DESC, 0L); - quotaObserverTimeHisto = getMetricsRegistry().newTimeHistogram( - QUOTA_OBSERVER_CHORE_TIME_NAME, QUOTA_OBSERVER_CHORE_TIME_DESC); - snapshotObserverTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); + quotaObserverTimeHisto = getMetricsRegistry().newTimeHistogram(QUOTA_OBSERVER_CHORE_TIME_NAME, + QUOTA_OBSERVER_CHORE_TIME_DESC); + snapshotObserverTimeHisto = getMetricsRegistry() + .newTimeHistogram(SNAPSHOT_OBSERVER_CHORE_TIME_NAME, SNAPSHOT_OBSERVER_CHORE_TIME_DESC); snapshotObserverSizeComputationTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); - snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry().newTimeHistogram( - SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); + SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_NAME, SNAPSHOT_OBSERVER_SIZE_COMPUTATION_TIME_DESC); + snapshotObserverSnapshotFetchTimeHisto = getMetricsRegistry() + .newTimeHistogram(SNAPSHOT_OBSERVER_FETCH_TIME_NAME, SNAPSHOT_OBSERVER_FETCH_TIME_DESC); } @Override @@ -109,7 +108,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder record = metricsCollector.addRecord(metricsRegistry.info()); if (wrapper != null) { // Summarize the tables - Map> tableUsages = wrapper.getTableSpaceUtilization(); + Map> tableUsages = wrapper.getTableSpaceUtilization(); String tableSummary = "[]"; if (tableUsages != null && !tableUsages.isEmpty()) { tableSummary = generateJsonQuotaSummary(tableUsages.entrySet(), "table"); @@ -118,7 +117,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // Summarize the namespaces String nsSummary = "[]"; - Map> namespaceUsages = wrapper.getNamespaceSpaceUtilization(); + Map> namespaceUsages = wrapper.getNamespaceSpaceUtilization(); if (namespaceUsages != null && !namespaceUsages.isEmpty()) { nsSummary = generateJsonQuotaSummary(namespaceUsages.entrySet(), "namespace"); } @@ -130,10 +129,10 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { /** * Summarizes the usage and limit for many targets (table or namespace) into JSON. */ - private String generateJsonQuotaSummary( - Iterable>> data, String target) { + private String generateJsonQuotaSummary(Iterable>> data, + String target) { StringBuilder sb = new StringBuilder(); - for (Entry> tableUsage : data) { + for (Entry> tableUsage : data) { String tableName = tableUsage.getKey(); long usage = tableUsage.getValue().getKey(); long limit = tableUsage.getValue().getValue(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java index a4b3fa194f9c..84c49062f03a 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.yetus.audience.InterfaceAudience; /** - * Factory to create MetricsMasterSource when given a MetricsMasterWrapper + * Factory to create MetricsMasterSource when given a MetricsMasterWrapper */ @InterfaceAudience.Private public class MetricsMasterSourceFactoryImpl implements MetricsMasterSourceFactory { private static enum FactoryStorage { INSTANCE; + MetricsMasterSource masterSource; } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java index ca94cdb61e7f..dc1e3b70103a 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsMasterSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -27,13 +26,11 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsMasterSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsMasterSource. Implements BaseSource through BaseSourceImpl, + * following the pattern */ @InterfaceAudience.Private -public class MetricsMasterSourceImpl - extends BaseSourceImpl implements MetricsMasterSource { +public class MetricsMasterSourceImpl extends BaseSourceImpl implements MetricsMasterSource { private final MetricsMasterWrapper masterWrapper; private MutableFastCounter clusterRequestsCounter; @@ -41,18 +38,11 @@ public class MetricsMasterSourceImpl private OperationMetrics serverCrashMetrics; public MetricsMasterSourceImpl(MetricsMasterWrapper masterWrapper) { - this(METRICS_NAME, - METRICS_DESCRIPTION, - METRICS_CONTEXT, - METRICS_JMX_CONTEXT, - masterWrapper); + this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, masterWrapper); } - public MetricsMasterSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsMasterWrapper masterWrapper) { + public MetricsMasterSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsMasterWrapper masterWrapper) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.masterWrapper = masterWrapper; @@ -64,9 +54,9 @@ public void init() { clusterRequestsCounter = metricsRegistry.newCounter(CLUSTER_REQUESTS_NAME, "", 0L); /* - * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is - * moving away from using Hadoop's metric2 to having independent HBase specific Metrics. Use - * {@link BaseSourceImpl#registry} to register the new metrics. + * NOTE: Please refer to HBASE-9774 and HBASE-14282. Based on these two issues, HBase is moving + * away from using Hadoop's metric2 to having independent HBase specific Metrics. Use {@link + * BaseSourceImpl#registry} to register the new metrics. */ serverCrashMetrics = new OperationMetrics(registry, SERVER_CRASH_METRIC_PREFIX); } @@ -87,42 +77,41 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { if (masterWrapper != null && masterWrapper.isRunning()) { metricsRecordBuilder .addGauge(Interns.info(MERGE_PLAN_COUNT_NAME, MERGE_PLAN_COUNT_DESC), - masterWrapper.getMergePlanCount()) + masterWrapper.getMergePlanCount()) .addGauge(Interns.info(SPLIT_PLAN_COUNT_NAME, SPLIT_PLAN_COUNT_DESC), - masterWrapper.getSplitPlanCount()) - .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, - MASTER_ACTIVE_TIME_DESC), masterWrapper.getActiveTime()) - .addGauge(Interns.info(MASTER_START_TIME_NAME, - MASTER_START_TIME_DESC), masterWrapper.getStartTime()) - .addGauge(Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, - MASTER_FINISHED_INITIALIZATION_TIME_DESC), - masterWrapper.getMasterInitializationTime()) + masterWrapper.getSplitPlanCount()) + .addGauge(Interns.info(MASTER_ACTIVE_TIME_NAME, MASTER_ACTIVE_TIME_DESC), + masterWrapper.getActiveTime()) + .addGauge(Interns.info(MASTER_START_TIME_NAME, MASTER_START_TIME_DESC), + masterWrapper.getStartTime()) + .addGauge( + Interns.info(MASTER_FINISHED_INITIALIZATION_TIME_NAME, + MASTER_FINISHED_INITIALIZATION_TIME_DESC), + masterWrapper.getMasterInitializationTime()) .addGauge(Interns.info(AVERAGE_LOAD_NAME, AVERAGE_LOAD_DESC), - masterWrapper.getAverageLoad()) + masterWrapper.getAverageLoad()) .tag(Interns.info(LIVE_REGION_SERVERS_NAME, LIVE_REGION_SERVERS_DESC), - masterWrapper.getRegionServers()) - .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, - NUMBER_OF_REGION_SERVERS_DESC), masterWrapper.getNumRegionServers()) + masterWrapper.getRegionServers()) + .addGauge(Interns.info(NUM_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), + masterWrapper.getNumRegionServers()) .tag(Interns.info(DEAD_REGION_SERVERS_NAME, DEAD_REGION_SERVERS_DESC), - masterWrapper.getDeadRegionServers()) - .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, - NUMBER_OF_DEAD_REGION_SERVERS_DESC), - masterWrapper.getNumDeadRegionServers()) + masterWrapper.getDeadRegionServers()) + .addGauge(Interns.info(NUM_DEAD_REGION_SERVERS_NAME, NUMBER_OF_DEAD_REGION_SERVERS_DESC), + masterWrapper.getNumDeadRegionServers()) .tag(Interns.info(DRAINING_REGION_SERVER_NAME, DRAINING_REGION_SERVER_DESC), - masterWrapper.getDrainingRegionServers()) + masterWrapper.getDrainingRegionServers()) .addGauge(Interns.info(NUM_DRAINING_REGION_SERVERS_NAME, NUMBER_OF_REGION_SERVERS_DESC), - masterWrapper.getNumDrainingRegionServers()) + masterWrapper.getNumDrainingRegionServers()) .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - masterWrapper.getZookeeperQuorum()) + masterWrapper.getZookeeperQuorum()) .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), masterWrapper.getServerName()) .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), masterWrapper.getClusterId()) - .tag(Interns.info(IS_ACTIVE_MASTER_NAME, - IS_ACTIVE_MASTER_DESC), - String.valueOf(masterWrapper.getIsActiveMaster())); + .tag(Interns.info(IS_ACTIVE_MASTER_NAME, IS_ACTIVE_MASTER_DESC), + String.valueOf(masterWrapper.getIsActiveMaster())); } metricsRegistry.snapshot(metricsRecordBuilder, all); - if(metricsAdapter != null) { + if (metricsAdapter != null) { metricsAdapter.snapshotAllMetrics(registry, metricsRecordBuilder); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java index 7077f73ea47b..f84911a199b0 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/MetricsSnapshotSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -33,20 +32,18 @@ public MetricsSnapshotSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsSnapshotSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsSnapshotSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @Override public void init() { - snapshotTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); - snapshotCloneTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); - snapshotRestoreTimeHisto = metricsRegistry.newTimeHistogram( - SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); + snapshotTimeHisto = metricsRegistry.newTimeHistogram(SNAPSHOT_TIME_NAME, SNAPSHOT_TIME_DESC); + snapshotCloneTimeHisto = + metricsRegistry.newTimeHistogram(SNAPSHOT_CLONE_TIME_NAME, SNAPSHOT_CLONE_TIME_DESC); + snapshotRestoreTimeHisto = + metricsRegistry.newTimeHistogram(SNAPSHOT_RESTORE_TIME_NAME, SNAPSHOT_RESTORE_TIME_DESC); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java index 7bccbb70d584..1e06514d502c 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsBalancerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -32,9 +31,8 @@ public MetricsBalancerSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsBalancerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, String metricsJmxContext) { + public MetricsBalancerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); updateBalancerStatus(true); } @@ -57,6 +55,6 @@ public void incrMiscInvocations() { @Override public void updateBalancerStatus(boolean status) { - metricsRegistry.tag(BALANCER_STATUS,"", String.valueOf(status), true); + metricsRegistry.tag(BALANCER_STATUS, "", String.valueOf(status), true); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java index de1dd81b17fa..8546799eb26f 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/master/balancer/MetricsStochasticBalancerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,19 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.master.balancer; import java.util.LinkedHashMap; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private -public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl implements - MetricsStochasticBalancerSource { +public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceImpl + implements MetricsStochasticBalancerSource { private static final String TABLE_FUNCTION_SEP = "_"; // Most Recently Used(MRU) cache @@ -38,14 +36,14 @@ public class MetricsStochasticBalancerSourceImpl extends MetricsBalancerSourceIm private int mruCap = calcMruCap(metricsSize); private final Map> stochasticCosts = - new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { - private static final long serialVersionUID = 8204713453436906599L; + new LinkedHashMap>(mruCap, MRU_LOAD_FACTOR, true) { + private static final long serialVersionUID = 8204713453436906599L; - @Override - protected boolean removeEldestEntry(Map.Entry> eldest) { - return size() > mruCap; - } - }; + @Override + protected boolean removeEldestEntry(Map.Entry> eldest) { + return size() > mruCap; + } + }; private Map costFunctionDescs = new ConcurrentHashMap<>(); /** diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java index a90d810701c5..653982c8082e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/BaseSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.hbase.metrics.impl.GlobalMetricRegistriesAdapter; @@ -33,16 +32,16 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to - * DefaultMetricsSystem and creation of the metrics registry. - * - * All MetricsSource's in hbase-hadoop2-compat should derive from this class. + * Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to + * DefaultMetricsSystem and creation of the metrics registry. All MetricsSource's in + * hbase-hadoop2-compat should derive from this class. */ @InterfaceAudience.Private public class BaseSourceImpl implements BaseSource, MetricsSource { private static enum DefaultMetricsSystemInitializer { INSTANCE; + private boolean inited = false; synchronized void init(String name) { @@ -62,10 +61,10 @@ synchronized void init(String name) { } /** - * @deprecated Use hbase-metrics/hbase-metrics-api module interfaces for new metrics. - * Defining BaseSources for new metric groups (WAL, RPC, etc) is not needed anymore, - * however, for existing {@link BaseSource} implementations, please use the field - * named "registry" which is a {@link MetricRegistry} instance together with the + * @deprecated Use hbase-metrics/hbase-metrics-api module interfaces for new metrics. Defining + * BaseSources for new metric groups (WAL, RPC, etc) is not needed anymore, however, + * for existing {@link BaseSource} implementations, please use the field named + * "registry" which is a {@link MetricRegistry} instance together with the * {@link HBaseMetrics2HadoopMetricsAdapter}. */ @Deprecated @@ -77,17 +76,16 @@ synchronized void init(String name) { /** * Note that there are at least 4 MetricRegistry definitions in the source code. The first one is - * Hadoop Metrics2 MetricRegistry, second one is DynamicMetricsRegistry which is HBase's fork - * of the Hadoop metrics2 class. The third one is the dropwizard metrics implementation of + * Hadoop Metrics2 MetricRegistry, second one is DynamicMetricsRegistry which is HBase's fork of + * the Hadoop metrics2 class. The third one is the dropwizard metrics implementation of * MetricRegistry, and finally a new API abstraction in HBase that is the * o.a.h.h.metrics.MetricRegistry class. This last one is the new way to use metrics within the - * HBase code. However, the others are in play because of existing metrics2 based code still - * needs to coexists until we get rid of all of our BaseSource and convert them to the new - * framework. Until that happens, new metrics can use the new API, but will be collected - * through the HBaseMetrics2HadoopMetricsAdapter class. - * - * BaseSourceImpl has two MetricRegistries. metricRegistry is for hadoop Metrics2 based - * metrics, while the registry is for hbase-metrics based metrics. + * HBase code. However, the others are in play because of existing metrics2 based code still needs + * to coexists until we get rid of all of our BaseSource and convert them to the new framework. + * Until that happens, new metrics can use the new API, but will be collected through the + * HBaseMetrics2HadoopMetricsAdapter class. BaseSourceImpl has two MetricRegistries. + * metricRegistry is for hadoop Metrics2 based metrics, while the registry is for hbase-metrics + * based metrics. */ protected final MetricRegistry registry; @@ -101,10 +99,7 @@ synchronized void init(String name) { */ protected final HBaseMetrics2HadoopMetricsAdapter metricsAdapter; - public BaseSourceImpl( - String metricsName, - String metricsDescription, - String metricsContext, + public BaseSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { this.metricsName = metricsName; @@ -115,7 +110,7 @@ public BaseSourceImpl( metricsRegistry = new DynamicMetricsRegistry(metricsName).setContext(metricsContext); DefaultMetricsSystemInitializer.INSTANCE.init(metricsName); - //Register this instance. + // Register this instance. DefaultMetricsSystem.instance().register(metricsJmxContext, metricsDescription, this); // hbase-metrics module based metrics are registered in the hbase MetricsRegistry. @@ -132,9 +127,8 @@ public void init() { /** * Set a single gauge to a value. - * * @param gaugeName gauge name - * @param value the new value of the gauge. + * @param value the new value of the gauge. */ public void setGauge(String gaugeName, long value) { MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, value); @@ -143,9 +137,8 @@ public void setGauge(String gaugeName, long value) { /** * Add some amount to a gauge. - * * @param gaugeName The name of the gauge to increment. - * @param delta The amount to increment the gauge by. + * @param delta The amount to increment the gauge by. */ public void incGauge(String gaugeName, long delta) { MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, 0L); @@ -154,9 +147,8 @@ public void incGauge(String gaugeName, long delta) { /** * Decrease the value of a named gauge. - * * @param gaugeName The name of the gauge. - * @param delta the ammount to subtract from a gauge value. + * @param delta the ammount to subtract from a gauge value. */ public void decGauge(String gaugeName, long delta) { MutableGaugeLong gaugeInt = metricsRegistry.getGauge(gaugeName, 0L); @@ -165,8 +157,7 @@ public void decGauge(String gaugeName, long delta) { /** * Increment a named counter by some value. - * - * @param key the name of the counter + * @param key the name of the counter * @param delta the ammount to increment */ public void incCounters(String key, long delta) { @@ -183,7 +174,6 @@ public void updateHistogram(String name, long value) { /** * Remove a named gauge. - * * @param key the key of the gauge to remove */ public void removeMetric(String key) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java index a4e75ba0137e..624d5be7b4f3 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/ExceptionTrackingSourceImpl.java @@ -15,19 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.lib.MutableFastCounter; import org.apache.yetus.audience.InterfaceAudience; /** - * Common base implementation for metrics sources which need to track exceptions thrown or - * received. + * Common base implementation for metrics sources which need to track exceptions thrown or received. */ @InterfaceAudience.Private -public class ExceptionTrackingSourceImpl extends BaseSourceImpl - implements ExceptionTrackingSource { +public class ExceptionTrackingSourceImpl extends BaseSourceImpl implements ExceptionTrackingSource { protected MutableFastCounter exceptions; protected MutableFastCounter exceptionsOOO; protected MutableFastCounter exceptionsBusy; @@ -46,7 +43,7 @@ public class ExceptionTrackingSourceImpl extends BaseSourceImpl protected MutableFastCounter otherExceptions; public ExceptionTrackingSourceImpl(String metricsName, String metricsDescription, - String metricsContext, String metricsJmxContext) { + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -54,36 +51,36 @@ public ExceptionTrackingSourceImpl(String metricsName, String metricsDescription public void init() { super.init(); this.exceptions = this.getMetricsRegistry().newCounter(EXCEPTIONS_NAME, EXCEPTIONS_DESC, 0L); - this.exceptionsOOO = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsBusy = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsUnknown = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsOOO = + this.getMetricsRegistry().newCounter(EXCEPTIONS_OOO_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsBusy = + this.getMetricsRegistry().newCounter(EXCEPTIONS_BUSY_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsUnknown = + this.getMetricsRegistry().newCounter(EXCEPTIONS_UNKNOWN_NAME, EXCEPTIONS_TYPE_DESC, 0L); this.exceptionsScannerReset = this.getMetricsRegistry() .newCounter(EXCEPTIONS_SCANNER_RESET_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsSanity = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsMoved = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsNSRE = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsSanity = + this.getMetricsRegistry().newCounter(EXCEPTIONS_SANITY_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsMoved = + this.getMetricsRegistry().newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsNSRE = + this.getMetricsRegistry().newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L); this.exceptionsMultiTooLarge = this.getMetricsRegistry() .newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L); this.exceptionsCallQueueTooBig = this.getMetricsRegistry() .newCounter(EXCEPTIONS_CALL_QUEUE_TOO_BIG, EXCEPTIONS_CALL_QUEUE_TOO_BIG_DESC, 0L); - this.exceptionsQuotaExceeded = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_QUOTA_EXCEEDED, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsRpcThrottling = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_RPC_THROTTLING, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsCallDropped = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_DROPPED, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionsCallTimedOut = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_CALL_TIMED_OUT, EXCEPTIONS_TYPE_DESC, 0L); - this.exceptionRequestTooBig = this.getMetricsRegistry() - .newCounter(EXCEPTIONS_REQUEST_TOO_BIG, EXCEPTIONS_TYPE_DESC, 0L); - this.otherExceptions = this.getMetricsRegistry() - .newCounter(OTHER_EXCEPTIONS, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsQuotaExceeded = + this.getMetricsRegistry().newCounter(EXCEPTIONS_QUOTA_EXCEEDED, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsRpcThrottling = + this.getMetricsRegistry().newCounter(EXCEPTIONS_RPC_THROTTLING, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsCallDropped = + this.getMetricsRegistry().newCounter(EXCEPTIONS_CALL_DROPPED, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionsCallTimedOut = + this.getMetricsRegistry().newCounter(EXCEPTIONS_CALL_TIMED_OUT, EXCEPTIONS_TYPE_DESC, 0L); + this.exceptionRequestTooBig = + this.getMetricsRegistry().newCounter(EXCEPTIONS_REQUEST_TOO_BIG, EXCEPTIONS_TYPE_DESC, 0L); + this.otherExceptions = + this.getMetricsRegistry().newCounter(OTHER_EXCEPTIONS, EXCEPTIONS_TYPE_DESC, 0L); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java index 254d3b4a9719..cd7cc1ce244c 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/Interns.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsTag; import org.apache.yetus.audience.InterfaceAudience; @@ -53,11 +51,11 @@ public ConcurrentHashMap load(MetricsInfo key) { } }); - private Interns(){} + private Interns() { + } /** * Get a metric info object - * * @return an interned metric info object */ public static MetricsInfo info(String name, String description) { @@ -72,8 +70,7 @@ public static MetricsInfo info(String name, String description) { /** * Get a metrics tag - * - * @param info of the tag + * @param info of the tag * @param value of the tag * @return an interned metrics tag */ @@ -89,10 +86,9 @@ public static MetricsTag tag(MetricsInfo info, String value) { /** * Get a metrics tag - * - * @param name of the tag + * @param name of the tag * @param description of the tag - * @param value of the tag + * @param value of the tag * @return an interned metrics tag */ public static MetricsTag tag(String name, String description, String value) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java index a5ffe8fb5e2c..e383b0e1bbd7 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MBeanSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import javax.management.ObjectName; - import org.apache.hadoop.metrics2.util.MBeans; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java index 42d139cb4e5a..a1c3c7fd8320 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/MetricsInfoImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import org.apache.hadoop.metrics2.MetricsInfo; @@ -37,30 +36,33 @@ class MetricsInfoImpl implements MetricsInfo { this.description = Preconditions.checkNotNull(description, "description"); } - @Override public String name() { + @Override + public String name() { return name; } - @Override public String description() { + @Override + public String description() { return description; } - @Override public boolean equals(Object obj) { + @Override + public boolean equals(Object obj) { if (obj instanceof MetricsInfo) { MetricsInfo other = (MetricsInfo) obj; - return Objects.equal(name, other.name()) && - Objects.equal(description, other.description()); + return Objects.equal(name, other.name()) && Objects.equal(description, other.description()); } return false; } - @Override public int hashCode() { + @Override + public int hashCode() { return Objects.hashCode(name, description); } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("name", name).add("description", description) + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("name", name).add("description", description) .toString(); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java index a816d4970449..a3f87818ce84 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/GlobalMetricRegistriesAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,13 +40,11 @@ /** * This class acts as an adapter to export the MetricRegistry's in the global registry. Each - * MetricRegistry will be registered or unregistered from the metric2 system. The collection will - * be performed via the MetricsSourceAdapter and the MetricRegistry will collected like a - * BaseSource instance for a group of metrics (like WAL, RPC, etc) with the MetricRegistryInfo's - * JMX context. - * - *

    Developer note: - * Unlike the current metrics2 based approach, the new metrics approach + * MetricRegistry will be registered or unregistered from the metric2 system. The collection will be + * performed via the MetricsSourceAdapter and the MetricRegistry will collected like a BaseSource + * instance for a group of metrics (like WAL, RPC, etc) with the MetricRegistryInfo's JMX context. + *

    + * Developer note: Unlike the current metrics2 based approach, the new metrics approach * (hbase-metrics-api and hbase-metrics modules) work by having different MetricRegistries that are * initialized and used from the code that lives in their respective modules (hbase-server, etc). * There is no need to define BaseSource classes and do a lot of indirection. The MetricRegistry'es @@ -54,7 +52,6 @@ * MetricRegistries.global() and register adapters to the metrics2 subsystem. These adapters then * report the actual values by delegating to * {@link HBaseMetrics2HadoopMetricsAdapter#snapshotAllMetrics(MetricRegistry, MetricsCollector)}. - * * We do not initialize the Hadoop Metrics2 system assuming that other BaseSources already do so * (see BaseSourceImpl). Once the last BaseSource is moved to the new system, the metric2 * initialization should be moved here. @@ -67,6 +64,7 @@ public final class GlobalMetricRegistriesAdapter { private class MetricsSourceAdapter implements MetricsSource { private final MetricRegistry registry; + MetricsSourceAdapter(MetricRegistry registry) { this.registry = registry; } @@ -135,7 +133,7 @@ private void doRun() { MetricsSourceAdapter adapter = new MetricsSourceAdapter(registry); LOG.info("Registering " + info.getMetricsJmxContext() + " " + info.getMetricsDescription()); DefaultMetricsSystem.instance().register(info.getMetricsJmxContext(), - info.getMetricsDescription(), adapter); + info.getMetricsDescription(), adapter); registeredSources.put(info, adapter); // next collection will collect the newly registered MetricSource. Doing this here leads to // ConcurrentModificationException. @@ -145,7 +143,7 @@ private void doRun() { boolean removed = false; // Remove registered sources if it is removed from the global registry for (Iterator> it = - registeredSources.entrySet().iterator(); it.hasNext();) { + registeredSources.entrySet().iterator(); it.hasNext();) { Entry entry = it.next(); MetricRegistryInfo info = entry.getKey(); Optional found = MetricRegistries.global().get(info); @@ -153,7 +151,7 @@ private void doRun() { if (LOG.isDebugEnabled()) { LOG.debug("Removing adapter for the MetricRegistry: " + info.getMetricsJmxContext()); } - synchronized(DefaultMetricsSystem.instance()) { + synchronized (DefaultMetricsSystem.instance()) { DefaultMetricsSystem.instance().unregisterSource(info.getMetricsJmxContext()); helper.removeSourceName(info.getMetricsJmxContext()); helper.removeObjectName(info.getMetricsJmxContext()); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java index 5fc2450cdb5e..d708a8296212 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/metrics/impl/HBaseMetrics2HadoopMetricsAdapter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -/* - * Copyright 2016 Josh Elser - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ package org.apache.hadoop.hbase.metrics.impl; import java.util.Map; @@ -53,16 +38,15 @@ /** * This is the adapter from "HBase Metrics Framework", implemented in hbase-metrics-api and - * hbase-metrics modules to the Hadoop Metrics2 framework. This adapter is not a metric source, - * but a helper to be able to collect all of the Metric's in the MetricRegistry using the - * MetricsCollector and MetricsRecordBuilder. - * - * Some of the code is forked from https://github.com/joshelser/dropwizard-hadoop-metrics2. + * hbase-metrics modules to the Hadoop Metrics2 framework. This adapter is not a metric source, but + * a helper to be able to collect all of the Metric's in the MetricRegistry using the + * MetricsCollector and MetricsRecordBuilder. Some of the code is forked from + * https://github.com/joshelser/dropwizard-hadoop-metrics2. */ @InterfaceAudience.Private public class HBaseMetrics2HadoopMetricsAdapter { - private static final Logger LOG - = LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); + private static final Logger LOG = + LoggerFactory.getLogger(HBaseMetrics2HadoopMetricsAdapter.class); private static final String EMPTY_STRING = ""; public HBaseMetrics2HadoopMetricsAdapter() { @@ -70,14 +54,12 @@ public HBaseMetrics2HadoopMetricsAdapter() { /** * Iterates over the MetricRegistry and adds them to the {@code collector}. - * * @param collector A metrics collector */ - public void snapshotAllMetrics(MetricRegistry metricRegistry, - MetricsCollector collector) { + public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsCollector collector) { MetricRegistryInfo info = metricRegistry.getMetricRegistryInfo(); - MetricsRecordBuilder builder = collector.addRecord(Interns.info(info.getMetricsName(), - info.getMetricsDescription())); + MetricsRecordBuilder builder = + collector.addRecord(Interns.info(info.getMetricsName(), info.getMetricsDescription())); builder.setContext(info.getMetricsContext()); snapshotAllMetrics(metricRegistry, builder); @@ -85,13 +67,12 @@ public void snapshotAllMetrics(MetricRegistry metricRegistry, /** * Iterates over the MetricRegistry and adds them to the {@code builder}. - * * @param builder A record builder */ public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuilder builder) { Map metrics = metricRegistry.getMetrics(); - for (Map.Entry e: metrics.entrySet()) { + for (Map.Entry e : metrics.entrySet()) { // Always capitalize the name String name = StringUtils.capitalize(e.getKey()); Metric metric = e.getValue(); @@ -99,13 +80,13 @@ public void snapshotAllMetrics(MetricRegistry metricRegistry, MetricsRecordBuild if (metric instanceof Gauge) { addGauge(name, (Gauge) metric, builder); } else if (metric instanceof Counter) { - addCounter(name, (Counter)metric, builder); + addCounter(name, (Counter) metric, builder); } else if (metric instanceof Histogram) { - addHistogram(name, (Histogram)metric, builder); + addHistogram(name, (Histogram) metric, builder); } else if (metric instanceof Meter) { - addMeter(name, (Meter)metric, builder); + addMeter(name, (Meter) metric, builder); } else if (metric instanceof Timer) { - addTimer(name, (Timer)metric, builder); + addTimer(name, (Timer) metric, builder); } else { LOG.info("Ignoring unknown Metric class " + metric.getClass().getName()); } @@ -137,7 +118,6 @@ private void addCounter(String name, Counter counter, MetricsRecordBuilder build /** * Add Histogram value-distribution data to a Hadoop-Metrics2 record building. - * * @param name A base name for this record. * @param histogram A histogram to measure distribution of values. * @param builder A Hadoop-Metrics2 record builder. @@ -149,7 +129,6 @@ private void addHistogram(String name, Histogram histogram, MetricsRecordBuilder /** * Add Dropwizard-Metrics rate information to a Hadoop-Metrics2 record builder, converting the * rates to the appropriate unit. - * * @param builder A Hadoop-Metrics2 record builder. * @param name A base name for this record. */ @@ -159,7 +138,7 @@ private void addMeter(String name, Meter meter, MetricsRecordBuilder builder) { builder.addGauge(Interns.info(name + "_1min_rate", EMPTY_STRING), meter.getOneMinuteRate()); builder.addGauge(Interns.info(name + "_5min_rate", EMPTY_STRING), meter.getFiveMinuteRate()); builder.addGauge(Interns.info(name + "_15min_rate", EMPTY_STRING), - meter.getFifteenMinuteRate()); + meter.getFifteenMinuteRate()); } private void addTimer(String name, Timer timer, MetricsRecordBuilder builder) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java index 047f8e13b1e1..4fbeefc3ea8e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsHeapMemoryManagerSourceImpl.java @@ -1,20 +1,19 @@ /* - * Copyright The Apache Software Foundation + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.regionserver; @@ -29,8 +28,8 @@ * BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl implements - MetricsHeapMemoryManagerSource { +public class MetricsHeapMemoryManagerSourceImpl extends BaseSourceImpl + implements MetricsHeapMemoryManagerSource { private final MetricHistogram blockedFlushHistogram; private final MetricHistogram unblockedFlushHistogram; @@ -56,35 +55,34 @@ public MetricsHeapMemoryManagerSourceImpl(String metricsName, String metricsDesc super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // Histograms - blockedFlushHistogram = getMetricsRegistry() - .newSizeHistogram(BLOCKED_FLUSH_NAME, BLOCKED_FLUSH_DESC); - unblockedFlushHistogram = getMetricsRegistry() - .newSizeHistogram(UNBLOCKED_FLUSH_NAME, UNBLOCKED_FLUSH_DESC); - incMemStoreSizeHistogram = getMetricsRegistry() - .newSizeHistogram(INC_MEMSTORE_TUNING_NAME, INC_MEMSTORE_TUNING_DESC); - decMemStoreSizeHistogram = getMetricsRegistry() - .newSizeHistogram(DEC_MEMSTORE_TUNING_NAME, DEC_MEMSTORE_TUNING_DESC); - incBlockCacheSizeHistogram = getMetricsRegistry() - .newSizeHistogram(INC_BLOCKCACHE_TUNING_NAME, INC_BLOCKCACHE_TUNING_DESC); - decBlockCacheSizeHistogram = getMetricsRegistry() - .newSizeHistogram(DEC_BLOCKCACHE_TUNING_NAME, DEC_BLOCKCACHE_TUNING_DESC); + blockedFlushHistogram = + getMetricsRegistry().newSizeHistogram(BLOCKED_FLUSH_NAME, BLOCKED_FLUSH_DESC); + unblockedFlushHistogram = + getMetricsRegistry().newSizeHistogram(UNBLOCKED_FLUSH_NAME, UNBLOCKED_FLUSH_DESC); + incMemStoreSizeHistogram = + getMetricsRegistry().newSizeHistogram(INC_MEMSTORE_TUNING_NAME, INC_MEMSTORE_TUNING_DESC); + decMemStoreSizeHistogram = + getMetricsRegistry().newSizeHistogram(DEC_MEMSTORE_TUNING_NAME, DEC_MEMSTORE_TUNING_DESC); + incBlockCacheSizeHistogram = getMetricsRegistry().newSizeHistogram(INC_BLOCKCACHE_TUNING_NAME, + INC_BLOCKCACHE_TUNING_DESC); + decBlockCacheSizeHistogram = getMetricsRegistry().newSizeHistogram(DEC_BLOCKCACHE_TUNING_NAME, + DEC_BLOCKCACHE_TUNING_DESC); // Gauges - blockedFlushGauge = getMetricsRegistry() - .newGauge(BLOCKED_FLUSH_GAUGE_NAME, BLOCKED_FLUSH_GAUGE_DESC, 0L); - unblockedFlushGauge = getMetricsRegistry() - .newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); - memStoreSizeGauge = getMetricsRegistry() - .newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); - blockCacheSizeGauge = getMetricsRegistry() - .newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); + blockedFlushGauge = + getMetricsRegistry().newGauge(BLOCKED_FLUSH_GAUGE_NAME, BLOCKED_FLUSH_GAUGE_DESC, 0L); + unblockedFlushGauge = + getMetricsRegistry().newGauge(UNBLOCKED_FLUSH_GAUGE_NAME, UNBLOCKED_FLUSH_GAUGE_DESC, 0L); + memStoreSizeGauge = + getMetricsRegistry().newGauge(MEMSTORE_SIZE_GAUGE_NAME, MEMSTORE_SIZE_GAUGE_DESC, 0L); + blockCacheSizeGauge = + getMetricsRegistry().newGauge(BLOCKCACHE_SIZE_GAUGE_NAME, BLOCKCACHE_SIZE_GAUGE_DESC, 0L); // Counters - doNothingCounter = getMetricsRegistry() - .newCounter(DO_NOTHING_COUNTER_NAME, DO_NOTHING_COUNTER_DESC, 0L); - aboveHeapOccupancyLowWatermarkCounter = getMetricsRegistry() - .newCounter(ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME, - ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC, 0L); + doNothingCounter = + getMetricsRegistry().newCounter(DO_NOTHING_COUNTER_NAME, DO_NOTHING_COUNTER_DESC, 0L); + aboveHeapOccupancyLowWatermarkCounter = getMetricsRegistry().newCounter( + ABOVE_HEAP_LOW_WATERMARK_COUNTER_NAME, ABOVE_HEAP_LOW_WATERMARK_COUNTER_DESC, 0L); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java index 44ce40b16c2c..43f293357b5c 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -47,11 +45,8 @@ public MetricsRegionAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - - public MetricsRegionAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsRegionAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // Every few mins clean the JMX cache. @@ -75,9 +70,8 @@ public void deregister(MetricsRegionSource toRemove) { } catch (Exception e) { // Ignored. If this errors out it means that someone is double // closing the region source and the region is already nulled out. - LOG.info( - "Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), - e); + LOG.info("Error trying to remove " + toRemove + " from " + this.getClass().getSimpleName(), + e); } clearCache(); } @@ -87,12 +81,11 @@ private synchronized void clearCache() { } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param collector the collector - * @param all get all the metrics regardless of when they last changed. + * @param all get all the metrics regardless of when they last changed. */ @Override public void getMetrics(MetricsCollector collector, boolean all) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java index 3a796ddf0c5f..7c1cc0670455 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerQuotaSourceImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,7 +18,6 @@ package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Meter; @@ -28,8 +28,8 @@ * Implementation of {@link MetricsRegionServerQuotaSource}. */ @InterfaceAudience.Private -public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl implements - MetricsRegionServerQuotaSource { +public class MetricsRegionServerQuotaSourceImpl extends BaseSourceImpl + implements MetricsRegionServerQuotaSource { private final Meter tablesInViolationCounter; private final Meter spaceQuotaSnapshotsReceived; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java index ccc17492dba5..1a9c8a9e42f1 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,12 +23,13 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper + * Factory to create MetricsRegionServerSource when given a MetricsRegionServerWrapper */ @InterfaceAudience.Private public class MetricsRegionServerSourceFactoryImpl implements MetricsRegionServerSourceFactory { public static enum FactoryStorage { INSTANCE; + private Object aggLock = new Object(); private MetricsRegionAggregateSourceImpl regionAggImpl; private MetricsUserAggregateSourceImpl userAggImpl; @@ -75,8 +76,8 @@ public synchronized MetricsHeapMemoryManagerSource getHeapMemoryManager() { } @Override - public synchronized MetricsRegionServerSource createServer( - MetricsRegionServerWrapper regionServerWrapper) { + public synchronized MetricsRegionServerSource + createServer(MetricsRegionServerWrapper regionServerWrapper) { return new MetricsRegionServerSourceImpl(regionServerWrapper); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java index e9ff31f29d45..f581879cf734 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -27,13 +26,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop2 implementation of MetricsRegionServerSource. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop2 implementation of MetricsRegionServerSource. Implements BaseSource through + * BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsRegionServerSourceImpl - extends BaseSourceImpl implements MetricsRegionServerSource { +public class MetricsRegionServerSourceImpl extends BaseSourceImpl + implements MetricsRegionServerSource { final MetricsRegionServerWrapper rsWrap; private final MetricHistogram putHisto; @@ -97,11 +95,8 @@ public MetricsRegionServerSourceImpl(MetricsRegionServerWrapper rsWrap) { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT, rsWrap); } - public MetricsRegionServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext, - MetricsRegionServerWrapper rsWrap) { + public MetricsRegionServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext, MetricsRegionServerWrapper rsWrap) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); this.rsWrap = rsWrap; @@ -131,58 +126,59 @@ public MetricsRegionServerSourceImpl(String metricsName, scanTimeHisto = getMetricsRegistry().newTimeHistogram(SCAN_TIME_KEY); flushTimeHisto = getMetricsRegistry().newTimeHistogram(FLUSH_TIME, FLUSH_TIME_DESC); - flushMemstoreSizeHisto = getMetricsRegistry() - .newSizeHistogram(FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); - flushOutputSizeHisto = getMetricsRegistry().newSizeHistogram(FLUSH_OUTPUT_SIZE, - FLUSH_OUTPUT_SIZE_DESC); - flushedOutputBytes = getMetricsRegistry().newCounter(FLUSHED_OUTPUT_BYTES, - FLUSHED_OUTPUT_BYTES_DESC, 0L); - flushedMemstoreBytes = getMetricsRegistry().newCounter(FLUSHED_MEMSTORE_BYTES, - FLUSHED_MEMSTORE_BYTES_DESC, 0L); - - compactionTimeHisto = getMetricsRegistry() - .newTimeHistogram(COMPACTION_TIME, COMPACTION_TIME_DESC); - compactionInputFileCountHisto = getMetricsRegistry() - .newHistogram(COMPACTION_INPUT_FILE_COUNT, COMPACTION_INPUT_FILE_COUNT_DESC); - compactionInputSizeHisto = getMetricsRegistry() - .newSizeHistogram(COMPACTION_INPUT_SIZE, COMPACTION_INPUT_SIZE_DESC); - compactionOutputFileCountHisto = getMetricsRegistry() - .newHistogram(COMPACTION_OUTPUT_FILE_COUNT, COMPACTION_OUTPUT_FILE_COUNT_DESC); - compactionOutputSizeHisto = getMetricsRegistry() - .newSizeHistogram(COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); - compactedInputBytes = getMetricsRegistry() - .newCounter(COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); - compactedOutputBytes = getMetricsRegistry() - .newCounter(COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); - - majorCompactionTimeHisto = getMetricsRegistry() - .newTimeHistogram(MAJOR_COMPACTION_TIME, MAJOR_COMPACTION_TIME_DESC); + flushMemstoreSizeHisto = + getMetricsRegistry().newSizeHistogram(FLUSH_MEMSTORE_SIZE, FLUSH_MEMSTORE_SIZE_DESC); + flushOutputSizeHisto = + getMetricsRegistry().newSizeHistogram(FLUSH_OUTPUT_SIZE, FLUSH_OUTPUT_SIZE_DESC); + flushedOutputBytes = + getMetricsRegistry().newCounter(FLUSHED_OUTPUT_BYTES, FLUSHED_OUTPUT_BYTES_DESC, 0L); + flushedMemstoreBytes = + getMetricsRegistry().newCounter(FLUSHED_MEMSTORE_BYTES, FLUSHED_MEMSTORE_BYTES_DESC, 0L); + + compactionTimeHisto = + getMetricsRegistry().newTimeHistogram(COMPACTION_TIME, COMPACTION_TIME_DESC); + compactionInputFileCountHisto = getMetricsRegistry().newHistogram(COMPACTION_INPUT_FILE_COUNT, + COMPACTION_INPUT_FILE_COUNT_DESC); + compactionInputSizeHisto = + getMetricsRegistry().newSizeHistogram(COMPACTION_INPUT_SIZE, COMPACTION_INPUT_SIZE_DESC); + compactionOutputFileCountHisto = getMetricsRegistry().newHistogram(COMPACTION_OUTPUT_FILE_COUNT, + COMPACTION_OUTPUT_FILE_COUNT_DESC); + compactionOutputSizeHisto = + getMetricsRegistry().newSizeHistogram(COMPACTION_OUTPUT_SIZE, COMPACTION_OUTPUT_SIZE_DESC); + compactedInputBytes = + getMetricsRegistry().newCounter(COMPACTED_INPUT_BYTES, COMPACTED_INPUT_BYTES_DESC, 0L); + compactedOutputBytes = + getMetricsRegistry().newCounter(COMPACTED_OUTPUT_BYTES, COMPACTED_OUTPUT_BYTES_DESC, 0L); + + majorCompactionTimeHisto = + getMetricsRegistry().newTimeHistogram(MAJOR_COMPACTION_TIME, MAJOR_COMPACTION_TIME_DESC); majorCompactionInputFileCountHisto = getMetricsRegistry() - .newHistogram(MAJOR_COMPACTION_INPUT_FILE_COUNT, MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC); + .newHistogram(MAJOR_COMPACTION_INPUT_FILE_COUNT, MAJOR_COMPACTION_INPUT_FILE_COUNT_DESC); majorCompactionInputSizeHisto = getMetricsRegistry() .newSizeHistogram(MAJOR_COMPACTION_INPUT_SIZE, MAJOR_COMPACTION_INPUT_SIZE_DESC); majorCompactionOutputFileCountHisto = getMetricsRegistry() .newHistogram(MAJOR_COMPACTION_OUTPUT_FILE_COUNT, MAJOR_COMPACTION_OUTPUT_FILE_COUNT_DESC); majorCompactionOutputSizeHisto = getMetricsRegistry() - .newSizeHistogram(MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC); - majorCompactedInputBytes = getMetricsRegistry() - .newCounter(MAJOR_COMPACTED_INPUT_BYTES, MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L); - majorCompactedOutputBytes = getMetricsRegistry() - .newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); + .newSizeHistogram(MAJOR_COMPACTION_OUTPUT_SIZE, MAJOR_COMPACTION_OUTPUT_SIZE_DESC); + majorCompactedInputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_INPUT_BYTES, + MAJOR_COMPACTED_INPUT_BYTES_DESC, 0L); + majorCompactedOutputBytes = getMetricsRegistry().newCounter(MAJOR_COMPACTED_OUTPUT_BYTES, + MAJOR_COMPACTED_OUTPUT_BYTES_DESC, 0L); splitTimeHisto = getMetricsRegistry().newTimeHistogram(SPLIT_KEY); splitRequest = getMetricsRegistry().newCounter(SPLIT_REQUEST_KEY, SPLIT_REQUEST_DESC, 0L); splitSuccess = getMetricsRegistry().newCounter(SPLIT_SUCCESS_KEY, SPLIT_SUCCESS_DESC, 0L); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); - scannerLeaseExpiredCount = getMetricsRegistry().newCounter(SCANNER_LEASE_EXPIRED_COUNT, SCANNER_LEASE_EXPIRED_COUNT_DESC, 0L); + scannerLeaseExpiredCount = getMetricsRegistry().newCounter(SCANNER_LEASE_EXPIRED_COUNT, + SCANNER_LEASE_EXPIRED_COUNT_DESC, 0L); } @Override @@ -332,12 +328,11 @@ public void incrScannerLeaseExpired() { } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param metricsCollector Collector to accept metrics - * @param all push all or only changed? + * @param all push all or only changed? */ @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { @@ -346,135 +341,133 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { // rsWrap can be null because this function is called inside of init. if (rsWrap != null) { addGaugesToMetricsRecordBuilder(mrb) - .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), - rsWrap.getTotalRequestCount()) - .addCounter(Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, - TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), rsWrap.getTotalRowActionRequestCount()) - .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), - rsWrap.getReadRequestsCount()) - .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, - FILTERED_READ_REQUEST_COUNT_DESC), rsWrap.getFilteredReadRequestsCount()) - .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), - rsWrap.getWriteRequestsCount()) - .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), - rsWrap.getRpcGetRequestsCount()) - .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), - rsWrap.getRpcFullScanRequestsCount()) - .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), - rsWrap.getRpcScanRequestsCount()) - .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), - rsWrap.getRpcMultiRequestsCount()) - .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), - rsWrap.getRpcMutateRequestsCount()) - .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksFailed()) - .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), - rsWrap.getCheckAndMutateChecksPassed()) - .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), - rsWrap.getBlockCacheHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, - BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), rsWrap.getBlockCachePrimaryHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), - rsWrap.getBlockCacheMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, - BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), rsWrap.getBlockCachePrimaryMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), - rsWrap.getBlockCacheEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, - BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), - rsWrap.getBlockCachePrimaryEvictedCount()) - .addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, - BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), - rsWrap.getBlockCacheFailedInsertions()) - .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), - rsWrap.getDataMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), - rsWrap.getLeafIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), - rsWrap.getBloomChunkMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), - rsWrap.getMetaMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), - rsWrap.getRootIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), - rsWrap.getIntermediateIndexMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), - rsWrap.getFileInfoMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), - rsWrap.getGeneralBloomMetaMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), - rsWrap.getDeleteFamilyBloomMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), - rsWrap.getTrailerMissCount()) - .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), - rsWrap.getDataHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), - rsWrap.getLeafIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), - rsWrap.getBloomChunkHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), - rsWrap.getMetaHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), - rsWrap.getRootIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), - rsWrap.getIntermediateIndexHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), - rsWrap.getFileInfoHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), - rsWrap.getGeneralBloomMetaHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), - rsWrap.getDeleteFamilyBloomHitCount()) - .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), - rsWrap.getTrailerHitCount()) - .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), - rsWrap.getUpdatesBlockedTime()) - .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), - rsWrap.getFlushedCellsCount()) - .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), - rsWrap.getCompactedCellsCount()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), - rsWrap.getMajorCompactedCellsCount()) - .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), - rsWrap.getFlushedCellsSize()) - .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), - rsWrap.getCompactedCellsSize()) - .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), - rsWrap.getMajorCompactedCellsSize()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, - CELLS_COUNT_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsCountCompactedFromMob()) - .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, - CELLS_COUNT_COMPACTED_TO_MOB_DESC), rsWrap.getCellsCountCompactedToMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, - CELLS_SIZE_COMPACTED_FROM_MOB_DESC), rsWrap.getCellsSizeCompactedFromMob()) - .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, - CELLS_SIZE_COMPACTED_TO_MOB_DESC), rsWrap.getCellsSizeCompactedToMob()) - .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), - rsWrap.getMobFlushCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), - rsWrap.getMobFlushedCellsCount()) - .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), - rsWrap.getMobFlushedCellsSize()) - .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), - rsWrap.getMobScanCellsCount()) - .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), - rsWrap.getMobScanCellsSize()) - .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, - MOB_FILE_CACHE_ACCESS_COUNT_DESC), rsWrap.getMobFileCacheAccessCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), - rsWrap.getMobFileCacheMissCount()) - .addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, - MOB_FILE_CACHE_EVICTED_COUNT_DESC), rsWrap.getMobFileCacheEvictedCount()) - .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) - .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), - rsWrap.getHedgedReadWins()) - .addCounter(Interns.info(HEDGED_READ_IN_CUR_THREAD, HEDGED_READ_IN_CUR_THREAD_DESC), - rsWrap.getHedgedReadOpsInCurThread()) - .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), - rsWrap.getBlockedRequestsCount()) - .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), - rsWrap.getZookeeperQuorum()) - .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) - .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); + .addCounter(Interns.info(TOTAL_REQUEST_COUNT, TOTAL_REQUEST_COUNT_DESC), + rsWrap.getTotalRequestCount()) + .addCounter( + Interns.info(TOTAL_ROW_ACTION_REQUEST_COUNT, TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), + rsWrap.getTotalRowActionRequestCount()) + .addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC), + rsWrap.getReadRequestsCount()) + .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, FILTERED_READ_REQUEST_COUNT_DESC), + rsWrap.getFilteredReadRequestsCount()) + .addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC), + rsWrap.getWriteRequestsCount()) + .addCounter(Interns.info(RPC_GET_REQUEST_COUNT, RPC_GET_REQUEST_COUNT_DESC), + rsWrap.getRpcGetRequestsCount()) + .addCounter(Interns.info(RPC_FULL_SCAN_REQUEST_COUNT, RPC_FULL_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcFullScanRequestsCount()) + .addCounter(Interns.info(RPC_SCAN_REQUEST_COUNT, RPC_SCAN_REQUEST_COUNT_DESC), + rsWrap.getRpcScanRequestsCount()) + .addCounter(Interns.info(RPC_MULTI_REQUEST_COUNT, RPC_MULTI_REQUEST_COUNT_DESC), + rsWrap.getRpcMultiRequestsCount()) + .addCounter(Interns.info(RPC_MUTATE_REQUEST_COUNT, RPC_MUTATE_REQUEST_COUNT_DESC), + rsWrap.getRpcMutateRequestsCount()) + .addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksFailed()) + .addCounter(Interns.info(CHECK_MUTATE_PASSED_COUNT, CHECK_MUTATE_PASSED_COUNT_DESC), + rsWrap.getCheckAndMutateChecksPassed()) + .addCounter(Interns.info(BLOCK_CACHE_HIT_COUNT, BLOCK_CACHE_HIT_COUNT_DESC), + rsWrap.getBlockCacheHitCount()) + .addCounter( + Interns.info(BLOCK_CACHE_PRIMARY_HIT_COUNT, BLOCK_CACHE_PRIMARY_HIT_COUNT_DESC), + rsWrap.getBlockCachePrimaryHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_MISS_COUNT, BLOCK_COUNT_MISS_COUNT_DESC), + rsWrap.getBlockCacheMissCount()) + .addCounter( + Interns.info(BLOCK_CACHE_PRIMARY_MISS_COUNT, BLOCK_COUNT_PRIMARY_MISS_COUNT_DESC), + rsWrap.getBlockCachePrimaryMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_EVICTION_COUNT, BLOCK_CACHE_EVICTION_COUNT_DESC), + rsWrap.getBlockCacheEvictedCount()) + .addCounter(Interns.info(BLOCK_CACHE_PRIMARY_EVICTION_COUNT, + BLOCK_CACHE_PRIMARY_EVICTION_COUNT_DESC), rsWrap.getBlockCachePrimaryEvictedCount()) + .addCounter(Interns.info(BLOCK_CACHE_FAILED_INSERTION_COUNT, + BLOCK_CACHE_FAILED_INSERTION_COUNT_DESC), rsWrap.getBlockCacheFailedInsertions()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_MISS_COUNT, ""), rsWrap.getDataMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_MISS_COUNT, ""), + rsWrap.getLeafIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_MISS_COUNT, ""), + rsWrap.getBloomChunkMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_MISS_COUNT, ""), rsWrap.getMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_MISS_COUNT, ""), + rsWrap.getRootIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_MISS_COUNT, ""), + rsWrap.getIntermediateIndexMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_MISS_COUNT, ""), + rsWrap.getFileInfoMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_MISS_COUNT, ""), + rsWrap.getGeneralBloomMetaMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_MISS_COUNT, ""), + rsWrap.getDeleteFamilyBloomMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_MISS_COUNT, ""), + rsWrap.getTrailerMissCount()) + .addCounter(Interns.info(BLOCK_CACHE_DATA_HIT_COUNT, ""), rsWrap.getDataHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_LEAF_INDEX_HIT_COUNT, ""), + rsWrap.getLeafIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_BLOOM_CHUNK_HIT_COUNT, ""), + rsWrap.getBloomChunkHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_META_HIT_COUNT, ""), rsWrap.getMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_ROOT_INDEX_HIT_COUNT, ""), + rsWrap.getRootIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_INTERMEDIATE_INDEX_HIT_COUNT, ""), + rsWrap.getIntermediateIndexHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_FILE_INFO_HIT_COUNT, ""), + rsWrap.getFileInfoHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_GENERAL_BLOOM_META_HIT_COUNT, ""), + rsWrap.getGeneralBloomMetaHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_DELETE_FAMILY_BLOOM_HIT_COUNT, ""), + rsWrap.getDeleteFamilyBloomHitCount()) + .addCounter(Interns.info(BLOCK_CACHE_TRAILER_HIT_COUNT, ""), rsWrap.getTrailerHitCount()) + .addCounter(Interns.info(UPDATES_BLOCKED_TIME, UPDATES_BLOCKED_DESC), + rsWrap.getUpdatesBlockedTime()) + .addCounter(Interns.info(FLUSHED_CELLS, FLUSHED_CELLS_DESC), + rsWrap.getFlushedCellsCount()) + .addCounter(Interns.info(COMPACTED_CELLS, COMPACTED_CELLS_DESC), + rsWrap.getCompactedCellsCount()) + .addCounter(Interns.info(MAJOR_COMPACTED_CELLS, MAJOR_COMPACTED_CELLS_DESC), + rsWrap.getMajorCompactedCellsCount()) + .addCounter(Interns.info(FLUSHED_CELLS_SIZE, FLUSHED_CELLS_SIZE_DESC), + rsWrap.getFlushedCellsSize()) + .addCounter(Interns.info(COMPACTED_CELLS_SIZE, COMPACTED_CELLS_SIZE_DESC), + rsWrap.getCompactedCellsSize()) + .addCounter(Interns.info(MAJOR_COMPACTED_CELLS_SIZE, MAJOR_COMPACTED_CELLS_SIZE_DESC), + rsWrap.getMajorCompactedCellsSize()) + .addCounter( + Interns.info(CELLS_COUNT_COMPACTED_FROM_MOB, CELLS_COUNT_COMPACTED_FROM_MOB_DESC), + rsWrap.getCellsCountCompactedFromMob()) + .addCounter(Interns.info(CELLS_COUNT_COMPACTED_TO_MOB, CELLS_COUNT_COMPACTED_TO_MOB_DESC), + rsWrap.getCellsCountCompactedToMob()) + .addCounter( + Interns.info(CELLS_SIZE_COMPACTED_FROM_MOB, CELLS_SIZE_COMPACTED_FROM_MOB_DESC), + rsWrap.getCellsSizeCompactedFromMob()) + .addCounter(Interns.info(CELLS_SIZE_COMPACTED_TO_MOB, CELLS_SIZE_COMPACTED_TO_MOB_DESC), + rsWrap.getCellsSizeCompactedToMob()) + .addCounter(Interns.info(MOB_FLUSH_COUNT, MOB_FLUSH_COUNT_DESC), + rsWrap.getMobFlushCount()) + .addCounter(Interns.info(MOB_FLUSHED_CELLS_COUNT, MOB_FLUSHED_CELLS_COUNT_DESC), + rsWrap.getMobFlushedCellsCount()) + .addCounter(Interns.info(MOB_FLUSHED_CELLS_SIZE, MOB_FLUSHED_CELLS_SIZE_DESC), + rsWrap.getMobFlushedCellsSize()) + .addCounter(Interns.info(MOB_SCAN_CELLS_COUNT, MOB_SCAN_CELLS_COUNT_DESC), + rsWrap.getMobScanCellsCount()) + .addCounter(Interns.info(MOB_SCAN_CELLS_SIZE, MOB_SCAN_CELLS_SIZE_DESC), + rsWrap.getMobScanCellsSize()) + .addCounter(Interns.info(MOB_FILE_CACHE_ACCESS_COUNT, MOB_FILE_CACHE_ACCESS_COUNT_DESC), + rsWrap.getMobFileCacheAccessCount()) + .addCounter(Interns.info(MOB_FILE_CACHE_MISS_COUNT, MOB_FILE_CACHE_MISS_COUNT_DESC), + rsWrap.getMobFileCacheMissCount()) + .addCounter(Interns.info(MOB_FILE_CACHE_EVICTED_COUNT, MOB_FILE_CACHE_EVICTED_COUNT_DESC), + rsWrap.getMobFileCacheEvictedCount()) + .addCounter(Interns.info(HEDGED_READS, HEDGED_READS_DESC), rsWrap.getHedgedReadOps()) + .addCounter(Interns.info(HEDGED_READ_WINS, HEDGED_READ_WINS_DESC), + rsWrap.getHedgedReadWins()) + .addCounter(Interns.info(HEDGED_READ_IN_CUR_THREAD, HEDGED_READ_IN_CUR_THREAD_DESC), + rsWrap.getHedgedReadOpsInCurThread()) + .addCounter(Interns.info(BLOCKED_REQUESTS_COUNT, BLOCKED_REQUESTS_COUNT_DESC), + rsWrap.getBlockedRequestsCount()) + .tag(Interns.info(ZOOKEEPER_QUORUM_NAME, ZOOKEEPER_QUORUM_DESC), + rsWrap.getZookeeperQuorum()) + .tag(Interns.info(SERVER_NAME_NAME, SERVER_NAME_DESC), rsWrap.getServerName()) + .tag(Interns.info(CLUSTER_ID_NAME, CLUSTER_ID_DESC), rsWrap.getClusterId()); } metricsRegistry.snapshot(mrb, all); @@ -488,112 +481,111 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { private MetricsRecordBuilder addGaugesToMetricsRecordBuilder(MetricsRecordBuilder mrb) { return mrb.addGauge(Interns.info(REGION_COUNT, REGION_COUNT_DESC), rsWrap.getNumOnlineRegions()) - .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) - .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) - .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) - .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), - rsWrap.getNumStoreFiles()) - .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) - .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) - .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), - rsWrap.getMaxStoreFileAge()) - .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), - rsWrap.getMinStoreFileAge()) - .addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), - rsWrap.getAvgStoreFileAge()) - .addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), - rsWrap.getNumReferenceFiles()) - .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()) - .addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), - rsWrap.getAverageRegionSize()) - .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), - rsWrap.getStoreFileIndexSize()) - .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), - rsWrap.getTotalStaticIndexSize()) - .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), - rsWrap.getTotalStaticBloomSize()) - .addGauge(Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, - NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), rsWrap.getNumMutationsWithoutWAL()) - .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), - rsWrap.getDataInMemoryWithoutWAL()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), - rsWrap.getPercentFileLocal()) - .addGauge(Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, - PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), - rsWrap.getPercentFileLocalSecondaryRegions()) - .addGauge(Interns.info(TOTAL_BYTES_READ, - TOTAL_BYTES_READ_DESC), - rsWrap.getTotalBytesRead()) - .addGauge(Interns.info(LOCAL_BYTES_READ, - LOCAL_BYTES_READ_DESC), - rsWrap.getLocalBytesRead()) - .addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, - SHORTCIRCUIT_BYTES_READ_DESC), - rsWrap.getShortCircuitBytesRead()) - .addGauge(Interns.info(ZEROCOPY_BYTES_READ, - ZEROCOPY_BYTES_READ_DESC), - rsWrap.getZeroCopyBytesRead()) - .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), - rsWrap.getSplitQueueSize()) - .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), - rsWrap.getCompactionQueueSize()) - .addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, - SMALL_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getSmallCompactionQueueSize()) - .addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, - LARGE_COMPACTION_QUEUE_LENGTH_DESC), rsWrap.getLargeCompactionQueueSize()) - .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), - rsWrap.getFlushQueueSize()) - .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), - rsWrap.getBlockCacheFreeSize()) - .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), - rsWrap.getBlockCacheCount()) - .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), - rsWrap.getBlockCacheSize()) - .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), - rsWrap.getBlockCacheHitPercent()) - .addGauge(Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, - BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), rsWrap.getBlockCacheHitCachingPercent()) - .addGauge(Interns.info(L1_CACHE_HIT_COUNT, L1_CACHE_HIT_COUNT_DESC), - rsWrap.getL1CacheHitCount()) - .addGauge(Interns.info(L1_CACHE_MISS_COUNT, L1_CACHE_MISS_COUNT_DESC), - rsWrap.getL1CacheMissCount()) - .addGauge(Interns.info(L1_CACHE_HIT_RATIO, L1_CACHE_HIT_RATIO_DESC), - rsWrap.getL1CacheHitRatio()) - .addGauge(Interns.info(L1_CACHE_MISS_RATIO, L1_CACHE_MISS_RATIO_DESC), - rsWrap.getL1CacheMissRatio()) - .addGauge(Interns.info(L2_CACHE_HIT_COUNT, L2_CACHE_HIT_COUNT_DESC), - rsWrap.getL2CacheHitCount()) - .addGauge(Interns.info(L2_CACHE_MISS_COUNT, L2_CACHE_MISS_COUNT_DESC), - rsWrap.getL2CacheMissCount()) - .addGauge(Interns.info(L2_CACHE_HIT_RATIO, L2_CACHE_HIT_RATIO_DESC), - rsWrap.getL2CacheHitRatio()) - .addGauge(Interns.info(L2_CACHE_MISS_RATIO, L2_CACHE_MISS_RATIO_DESC), - rsWrap.getL2CacheMissRatio()) - .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), - rsWrap.getMobFileCacheCount()) - .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), - rsWrap.getMobFileCacheHitPercent()) - .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC), - rsWrap.getReadRequestsRatePerSecond()) - .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC), - rsWrap.getWriteRequestsRatePerSecond()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES, - BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC), - rsWrap.getByteBuffAllocatorHeapAllocationBytes()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES, - BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC), - rsWrap.getByteBuffAllocatorPoolAllocationBytes()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO, - BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC), - rsWrap.getByteBuffAllocatorHeapAllocRatio()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT, - BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC), - rsWrap.getByteBuffAllocatorTotalBufferCount()) - .addGauge(Interns.info(BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT, - BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT_DESC), - rsWrap.getByteBuffAllocatorUsedBufferCount()) - .addGauge(Interns.info(ACTIVE_SCANNERS, ACTIVE_SCANNERS_DESC), - rsWrap.getActiveScanners()); + .addGauge(Interns.info(STORE_COUNT, STORE_COUNT_DESC), rsWrap.getNumStores()) + .addGauge(Interns.info(WALFILE_COUNT, WALFILE_COUNT_DESC), rsWrap.getNumWALFiles()) + .addGauge(Interns.info(WALFILE_SIZE, WALFILE_SIZE_DESC), rsWrap.getWALFileSize()) + .addGauge(Interns.info(STOREFILE_COUNT, STOREFILE_COUNT_DESC), rsWrap.getNumStoreFiles()) + .addGauge(Interns.info(MEMSTORE_SIZE, MEMSTORE_SIZE_DESC), rsWrap.getMemStoreSize()) + .addGauge(Interns.info(STOREFILE_SIZE, STOREFILE_SIZE_DESC), rsWrap.getStoreFileSize()) + .addGauge(Interns.info(MAX_STORE_FILE_AGE, MAX_STORE_FILE_AGE_DESC), + rsWrap.getMaxStoreFileAge()) + .addGauge(Interns.info(MIN_STORE_FILE_AGE, MIN_STORE_FILE_AGE_DESC), + rsWrap.getMinStoreFileAge()) + .addGauge(Interns.info(AVG_STORE_FILE_AGE, AVG_STORE_FILE_AGE_DESC), + rsWrap.getAvgStoreFileAge()) + .addGauge(Interns.info(NUM_REFERENCE_FILES, NUM_REFERENCE_FILES_DESC), + rsWrap.getNumReferenceFiles()) + .addGauge(Interns.info(RS_START_TIME_NAME, RS_START_TIME_DESC), rsWrap.getStartCode()) + .addGauge(Interns.info(AVERAGE_REGION_SIZE, AVERAGE_REGION_SIZE_DESC), + rsWrap.getAverageRegionSize()) + .addGauge(Interns.info(STOREFILE_INDEX_SIZE, STOREFILE_INDEX_SIZE_DESC), + rsWrap.getStoreFileIndexSize()) + .addGauge(Interns.info(STATIC_INDEX_SIZE, STATIC_INDEX_SIZE_DESC), + rsWrap.getTotalStaticIndexSize()) + .addGauge(Interns.info(STATIC_BLOOM_SIZE, STATIC_BLOOM_SIZE_DESC), + rsWrap.getTotalStaticBloomSize()) + .addGauge( + Interns.info(NUMBER_OF_MUTATIONS_WITHOUT_WAL, NUMBER_OF_MUTATIONS_WITHOUT_WAL_DESC), + rsWrap.getNumMutationsWithoutWAL()) + .addGauge(Interns.info(DATA_SIZE_WITHOUT_WAL, DATA_SIZE_WITHOUT_WAL_DESC), + rsWrap.getDataInMemoryWithoutWAL()) + .addGauge(Interns.info(PERCENT_FILES_LOCAL, PERCENT_FILES_LOCAL_DESC), + rsWrap.getPercentFileLocal()) + .addGauge( + Interns.info(PERCENT_FILES_LOCAL_SECONDARY_REGIONS, + PERCENT_FILES_LOCAL_SECONDARY_REGIONS_DESC), + rsWrap.getPercentFileLocalSecondaryRegions()) + .addGauge(Interns.info(TOTAL_BYTES_READ, TOTAL_BYTES_READ_DESC), rsWrap.getTotalBytesRead()) + .addGauge(Interns.info(LOCAL_BYTES_READ, LOCAL_BYTES_READ_DESC), rsWrap.getLocalBytesRead()) + .addGauge(Interns.info(SHORTCIRCUIT_BYTES_READ, SHORTCIRCUIT_BYTES_READ_DESC), + rsWrap.getShortCircuitBytesRead()) + .addGauge(Interns.info(ZEROCOPY_BYTES_READ, ZEROCOPY_BYTES_READ_DESC), + rsWrap.getZeroCopyBytesRead()) + .addGauge(Interns.info(SPLIT_QUEUE_LENGTH, SPLIT_QUEUE_LENGTH_DESC), + rsWrap.getSplitQueueSize()) + .addGauge(Interns.info(COMPACTION_QUEUE_LENGTH, COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getCompactionQueueSize()) + .addGauge(Interns.info(SMALL_COMPACTION_QUEUE_LENGTH, SMALL_COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getSmallCompactionQueueSize()) + .addGauge(Interns.info(LARGE_COMPACTION_QUEUE_LENGTH, LARGE_COMPACTION_QUEUE_LENGTH_DESC), + rsWrap.getLargeCompactionQueueSize()) + .addGauge(Interns.info(FLUSH_QUEUE_LENGTH, FLUSH_QUEUE_LENGTH_DESC), + rsWrap.getFlushQueueSize()) + .addGauge(Interns.info(BLOCK_CACHE_FREE_SIZE, BLOCK_CACHE_FREE_DESC), + rsWrap.getBlockCacheFreeSize()) + .addGauge(Interns.info(BLOCK_CACHE_COUNT, BLOCK_CACHE_COUNT_DESC), + rsWrap.getBlockCacheCount()) + .addGauge(Interns.info(BLOCK_CACHE_SIZE, BLOCK_CACHE_SIZE_DESC), rsWrap.getBlockCacheSize()) + .addGauge(Interns.info(BLOCK_CACHE_HIT_PERCENT, BLOCK_CACHE_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitPercent()) + .addGauge( + Interns.info(BLOCK_CACHE_EXPRESS_HIT_PERCENT, BLOCK_CACHE_EXPRESS_HIT_PERCENT_DESC), + rsWrap.getBlockCacheHitCachingPercent()) + .addGauge(Interns.info(L1_CACHE_HIT_COUNT, L1_CACHE_HIT_COUNT_DESC), + rsWrap.getL1CacheHitCount()) + .addGauge(Interns.info(L1_CACHE_MISS_COUNT, L1_CACHE_MISS_COUNT_DESC), + rsWrap.getL1CacheMissCount()) + .addGauge(Interns.info(L1_CACHE_HIT_RATIO, L1_CACHE_HIT_RATIO_DESC), + rsWrap.getL1CacheHitRatio()) + .addGauge(Interns.info(L1_CACHE_MISS_RATIO, L1_CACHE_MISS_RATIO_DESC), + rsWrap.getL1CacheMissRatio()) + .addGauge(Interns.info(L2_CACHE_HIT_COUNT, L2_CACHE_HIT_COUNT_DESC), + rsWrap.getL2CacheHitCount()) + .addGauge(Interns.info(L2_CACHE_MISS_COUNT, L2_CACHE_MISS_COUNT_DESC), + rsWrap.getL2CacheMissCount()) + .addGauge(Interns.info(L2_CACHE_HIT_RATIO, L2_CACHE_HIT_RATIO_DESC), + rsWrap.getL2CacheHitRatio()) + .addGauge(Interns.info(L2_CACHE_MISS_RATIO, L2_CACHE_MISS_RATIO_DESC), + rsWrap.getL2CacheMissRatio()) + .addGauge(Interns.info(MOB_FILE_CACHE_COUNT, MOB_FILE_CACHE_COUNT_DESC), + rsWrap.getMobFileCacheCount()) + .addGauge(Interns.info(MOB_FILE_CACHE_HIT_PERCENT, MOB_FILE_CACHE_HIT_PERCENT_DESC), + rsWrap.getMobFileCacheHitPercent()) + .addGauge(Interns.info(READ_REQUEST_RATE_PER_SECOND, READ_REQUEST_RATE_DESC), + rsWrap.getReadRequestsRatePerSecond()) + .addGauge(Interns.info(WRITE_REQUEST_RATE_PER_SECOND, WRITE_REQUEST_RATE_DESC), + rsWrap.getWriteRequestsRatePerSecond()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES, + BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_BYTES_DESC), + rsWrap.getByteBuffAllocatorHeapAllocationBytes()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES, + BYTE_BUFF_ALLOCATOR_POOL_ALLOCATION_BYTES_DESC), + rsWrap.getByteBuffAllocatorPoolAllocationBytes()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO, + BYTE_BUFF_ALLOCATOR_HEAP_ALLOCATION_RATIO_DESC), + rsWrap.getByteBuffAllocatorHeapAllocRatio()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT, + BYTE_BUFF_ALLOCATOR_TOTAL_BUFFER_COUNT_DESC), + rsWrap.getByteBuffAllocatorTotalBufferCount()) + .addGauge( + Interns.info(BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT, + BYTE_BUFF_ALLOCATOR_USED_BUFFER_COUNT_DESC), + rsWrap.getByteBuffAllocatorUsedBufferCount()) + .addGauge(Interns.info(ACTIVE_SCANNERS, ACTIVE_SCANNERS_DESC), rsWrap.getActiveScanners()); } @Override diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java index 09d600f26ffe..ae500d60bac8 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DynamicMetricsRegistry; @@ -72,14 +70,14 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource { private final int hashCode; public MetricsRegionSourceImpl(MetricsRegionWrapper regionWrapper, - MetricsRegionAggregateSourceImpl aggregate) { + MetricsRegionAggregateSourceImpl aggregate) { this.regionWrapper = regionWrapper; agg = aggregate; hashCode = regionWrapper.getRegionHashCode(); agg.register(this); - LOG.debug("Creating new MetricsRegionSourceImpl for table " + - regionWrapper.getTableName() + " " + regionWrapper.getRegionName()); + LOG.debug("Creating new MetricsRegionSourceImpl for table " + regionWrapper.getTableName() + " " + + regionWrapper.getRegionName()); registry = agg.getMetricsRegistry(); @@ -204,106 +202,84 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) { return; } + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, + MetricsRegionServerSource.STORE_COUNT_DESC), this.regionWrapper.getNumStores()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + MetricsRegionServerSource.STOREFILE_COUNT_DESC), this.regionWrapper.getNumStoreFiles()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STORE_REF_COUNT, + MetricsRegionServerSource.STORE_REF_COUNT), this.regionWrapper.getStoreRefCount()); + mrb.addGauge( + Interns.info( + regionNamePrefix + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT, + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT), + this.regionWrapper.getMaxCompactedStoreFileRefCount()); + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + MetricsRegionServerSource.MEMSTORE_SIZE_DESC), this.regionWrapper.getMemStoreSize()); mrb.addGauge( - Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_COUNT, - MetricsRegionServerSource.STORE_COUNT_DESC), - this.regionWrapper.getNumStores()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, - MetricsRegionServerSource.STOREFILE_COUNT_DESC), - this.regionWrapper.getNumStoreFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STORE_REF_COUNT, - MetricsRegionServerSource.STORE_REF_COUNT), - this.regionWrapper.getStoreRefCount()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT, - MetricsRegionServerSource.MAX_COMPACTED_STORE_FILE_REF_COUNT), - this.regionWrapper.getMaxCompactedStoreFileRefCount() - ); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, - MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - this.regionWrapper.getMemStoreSize()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, - MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), + Interns.info(regionNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, + MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), this.regionWrapper.getMaxStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, - MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, + MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), this.regionWrapper.getMinStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, - MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, + MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), this.regionWrapper.getAvgStoreFileAge()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, - MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), + mrb.addGauge( + Interns.info(regionNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, + MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), this.regionWrapper.getNumReferenceFiles()); - mrb.addGauge(Interns.info( - regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, - MetricsRegionServerSource.STOREFILE_SIZE_DESC), - this.regionWrapper.getStoreFileSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, - MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), - this.regionWrapper.getNumCompactionsCompleted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_FAILED_COUNT, + mrb.addGauge(Interns.info(regionNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + MetricsRegionServerSource.STOREFILE_SIZE_DESC), this.regionWrapper.getStoreFileSize()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_COMPLETED_COUNT, + MetricsRegionSource.COMPACTIONS_COMPLETED_DESC), + this.regionWrapper.getNumCompactionsCompleted()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_FAILED_COUNT, MetricsRegionSource.COMPACTIONS_FAILED_DESC), - this.regionWrapper.getNumCompactionsFailed()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.LAST_MAJOR_COMPACTION_AGE, - MetricsRegionSource.LAST_MAJOR_COMPACTION_DESC), - this.regionWrapper.getLastMajorCompactionAge()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, - MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), - this.regionWrapper.getNumBytesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, - MetricsRegionSource.NUM_FILES_COMPACTED_DESC), - this.regionWrapper.getNumFilesCompacted()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, - MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - this.regionWrapper.getReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, - MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), - this.regionWrapper.getFilteredReadRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, - MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - this.regionWrapper.getWriteRequestCount()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.REPLICA_ID, - MetricsRegionSource.REPLICA_ID_DESC), - this.regionWrapper.getReplicaId()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.COMPACTIONS_QUEUED_COUNT, - MetricsRegionSource.COMPACTIONS_QUEUED_DESC), - this.regionWrapper.getNumCompactionsQueued()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.FLUSHES_QUEUED_COUNT, - MetricsRegionSource.FLUSHES_QUEUED_DESC), - this.regionWrapper.getNumFlushesQueued()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.MAX_COMPACTION_QUEUE_SIZE, - MetricsRegionSource.MAX_COMPACTION_QUEUE_DESC), - this.regionWrapper.getMaxCompactionQueueSize()); - mrb.addCounter(Interns.info( - regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE, - MetricsRegionSource.MAX_FLUSH_QUEUE_DESC), - this.regionWrapper.getMaxFlushQueueSize()); + this.regionWrapper.getNumCompactionsFailed()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.LAST_MAJOR_COMPACTION_AGE, + MetricsRegionSource.LAST_MAJOR_COMPACTION_DESC), + this.regionWrapper.getLastMajorCompactionAge()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.NUM_BYTES_COMPACTED_COUNT, + MetricsRegionSource.NUM_BYTES_COMPACTED_DESC), this.regionWrapper.getNumBytesCompacted()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.NUM_FILES_COMPACTED_COUNT, + MetricsRegionSource.NUM_FILES_COMPACTED_DESC), this.regionWrapper.getNumFilesCompacted()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), + this.regionWrapper.getReadRequestCount()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), + this.regionWrapper.getFilteredReadRequestCount()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), + this.regionWrapper.getWriteRequestCount()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.REPLICA_ID, + MetricsRegionSource.REPLICA_ID_DESC), this.regionWrapper.getReplicaId()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.COMPACTIONS_QUEUED_COUNT, + MetricsRegionSource.COMPACTIONS_QUEUED_DESC), + this.regionWrapper.getNumCompactionsQueued()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.FLUSHES_QUEUED_COUNT, + MetricsRegionSource.FLUSHES_QUEUED_DESC), this.regionWrapper.getNumFlushesQueued()); + mrb.addCounter( + Interns.info(regionNamePrefix + MetricsRegionSource.MAX_COMPACTION_QUEUE_SIZE, + MetricsRegionSource.MAX_COMPACTION_QUEUE_DESC), + this.regionWrapper.getMaxCompactionQueueSize()); + mrb.addCounter(Interns.info(regionNamePrefix + MetricsRegionSource.MAX_FLUSH_QUEUE_SIZE, + MetricsRegionSource.MAX_FLUSH_QUEUE_DESC), this.regionWrapper.getMaxFlushQueueSize()); addCounter(mrb, this.regionWrapper.getMemstoreOnlyRowReadsCount(), MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE, MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC); addCounter(mrb, this.regionWrapper.getMixedRowReadsCount(), - MetricsRegionSource.MIXED_ROW_READS, - MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); + MetricsRegionSource.MIXED_ROW_READS, MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); } } @@ -326,7 +302,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsRegionSourceImpl && compareTo((MetricsRegionSourceImpl) obj) == 0); + return obj == this || (obj instanceof MetricsRegionSourceImpl + && compareTo((MetricsRegionSourceImpl) obj) == 0); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java index 0b13e5c8dfed..06a5bc82284f 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.hbase.metrics.Interns; @@ -31,7 +29,7 @@ @InterfaceAudience.Private public class MetricsTableAggregateSourceImpl extends BaseSourceImpl - implements MetricsTableAggregateSource { + implements MetricsTableAggregateSource { private static final Logger LOG = LoggerFactory.getLogger(MetricsTableAggregateSourceImpl.class); private ConcurrentHashMap tableSources = new ConcurrentHashMap<>(); @@ -40,10 +38,8 @@ public MetricsTableAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsTableAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsTableAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } @@ -73,7 +69,7 @@ public MetricsTableSource getOrCreateTableSource(String table, return source; } MetricsTableSource newSource = CompatibilitySingletonFactory - .getInstance(MetricsRegionServerSourceFactory.class).createTable(table, wrapper); + .getInstance(MetricsRegionServerSourceFactory.class).createTable(table, wrapper); return tableSources.computeIfAbsent(table, k -> { // register the new metrics now newSource.registerMetrics(); @@ -82,12 +78,11 @@ public MetricsTableSource getOrCreateTableSource(String table, } /** - * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all - * expectations of java programmers. Instead of returning anything Hadoop metrics expects + * Yes this is a get function that doesn't return anything. Thanks Hadoop for breaking all + * expectations of java programmers. Instead of returning anything Hadoop metrics expects * getMetrics to push the metrics into the collector. - * * @param collector the collector - * @param all get all the metrics regardless of when they last changed. + * @param all get all the metrics regardless of when they last changed. */ @Override public void getMetrics(MetricsCollector collector, boolean all) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java index dd143d4c6f5d..0db5dd510628 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableLatenciesImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -32,7 +33,7 @@ @InterfaceAudience.Private public class MetricsTableLatenciesImpl extends BaseSourceImpl implements MetricsTableLatencies { - private final HashMap histogramsByTable = new HashMap<>(); + private final HashMap histogramsByTable = new HashMap<>(); public static class TableHistograms { final MetricHistogram getTimeHisto; @@ -50,22 +51,19 @@ public static class TableHistograms { TableHistograms(DynamicMetricsRegistry registry, TableName tn) { getTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, GET_TIME)); - incrementTimeHisto = registry.newTimeHistogram( - qualifyMetricsName(tn, INCREMENT_TIME)); + incrementTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, INCREMENT_TIME)); appendTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, APPEND_TIME)); putTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_TIME)); putBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, PUT_BATCH_TIME)); deleteTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_TIME)); - deleteBatchTimeHisto = registry.newTimeHistogram( - qualifyMetricsName(tn, DELETE_BATCH_TIME)); + deleteBatchTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, DELETE_BATCH_TIME)); scanTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, SCAN_TIME)); scanSizeHisto = registry.newSizeHistogram(qualifyMetricsName(tn, SCAN_SIZE)); checkAndDeleteTimeHisto = - registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); - checkAndPutTimeHisto = - registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_DELETE_TIME)); + checkAndPutTimeHisto = registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_PUT_TIME)); checkAndMutateTimeHisto = - registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); + registry.newTimeHistogram(qualifyMetricsName(tn, CHECK_AND_MUTATE_TIME)); } public void updatePut(long time) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java index 6b1d323dc19a..f93e425baadd 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableQueryMeterImpl.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to you under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -18,15 +19,14 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.Meter; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in - * a RegionServer. + * Implementation of {@link MetricsTableQueryMeter} to track query per second for each table in a + * RegionServer. */ @InterfaceAudience.Private public class MetricsTableQueryMeterImpl implements MetricsTableQueryMeter { @@ -42,10 +42,10 @@ private static class TableMeters { final Meter tableWriteQueryMeter; TableMeters(MetricRegistry metricRegistry, TableName tableName) { - this.tableReadQueryMeter = metricRegistry.meter(qualifyMetricsName(tableName, - TABLE_READ_QUERY_PER_SECOND)); + this.tableReadQueryMeter = + metricRegistry.meter(qualifyMetricsName(tableName, TABLE_READ_QUERY_PER_SECOND)); this.tableWriteQueryMeter = - metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND)); + metricRegistry.meter(qualifyMetricsName(tableName, TABLE_WRITE_QUERY_PER_SECOND)); } public void updateTableReadQueryMeter(long count) { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java index d82cc535bf7c..c94718fd6878 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsTableSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,7 +64,6 @@ import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.metrics.Interns; import org.apache.hadoop.metrics2.MetricHistogram; @@ -126,19 +125,18 @@ public class MetricsTableSourceImpl implements MetricsTableSource { private MutableFastCounter majorCompactedInputBytes; private MutableFastCounter majorCompactedOutputBytes; - public MetricsTableSourceImpl(String tblName, - MetricsTableAggregateSourceImpl aggregate, MetricsTableWrapperAggregate tblWrapperAgg) { + public MetricsTableSourceImpl(String tblName, MetricsTableAggregateSourceImpl aggregate, + MetricsTableWrapperAggregate tblWrapperAgg) { LOG.debug("Creating new MetricsTableSourceImpl for table '{}'", tblName); this.tableName = TableName.valueOf(tblName); this.agg = aggregate; this.tableWrapperAgg = tblWrapperAgg; this.registry = agg.getMetricsRegistry(); - this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() + - "_table_" + this.tableName.getQualifierAsString(); + this.tableNamePrefixPart1 = "Namespace_" + this.tableName.getNamespaceAsString() + "_table_" + + this.tableName.getQualifierAsString(); this.tableNamePrefixPart2 = "_metric_"; - this.tableNamePrefix = tableNamePrefixPart1 + - tableNamePrefixPart2; + this.tableNamePrefix = tableNamePrefixPart1 + tableNamePrefixPart2; this.hashCode = this.tableName.hashCode(); } @@ -238,6 +236,7 @@ public void close() { tableWrapperAgg = null; } } + @Override public MetricsTableAggregateSource getAggregateSource() { return agg; @@ -272,58 +271,71 @@ void snapshot(MetricsRecordBuilder mrb, boolean ignored) { } if (this.tableWrapperAgg != null) { - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT, MetricsRegionServerSource.READ_REQUEST_COUNT_DESC), - tableWrapperAgg.getReadRequestCount(tableName.getNameAsString())); + tableWrapperAgg.getReadRequestCount(tableName.getNameAsString())); mrb.addCounter( - Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, - MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), - tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, + Interns.info(tableNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT, + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC), + tableWrapperAgg.getFilteredReadRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT, MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC), - tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString())); - mrb.addCounter(Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT, + tableWrapperAgg.getWriteRequestCount(tableName.getNameAsString())); + mrb.addCounter( + Interns.info(tableNamePrefix + MetricsRegionServerSource.TOTAL_REQUEST_COUNT, MetricsRegionServerSource.TOTAL_REQUEST_COUNT_DESC), - tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, + tableWrapperAgg.getTotalRequestsCount(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MEMSTORE_SIZE, MetricsRegionServerSource.MEMSTORE_SIZE_DESC), - tableWrapperAgg.getMemStoreSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, + tableWrapperAgg.getMemStoreSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_COUNT, MetricsRegionServerSource.STOREFILE_COUNT_DESC), - tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, + tableWrapperAgg.getNumStoreFiles(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STOREFILE_SIZE, MetricsRegionServerSource.STOREFILE_SIZE_DESC), - tableWrapperAgg.getStoreFileSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, - MetricsTableSource.TABLE_SIZE_DESC), + tableWrapperAgg.getStoreFileSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsTableSource.TABLE_SIZE, + MetricsTableSource.TABLE_SIZE_DESC), tableWrapperAgg.getTableSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE, + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.AVERAGE_REGION_SIZE, MetricsRegionServerSource.AVERAGE_REGION_SIZE_DESC), - tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT, + tableWrapperAgg.getAvgRegionSize(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.REGION_COUNT, MetricsRegionServerSource.REGION_COUNT_DESC), - tableWrapperAgg.getNumRegions(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT, + tableWrapperAgg.getNumRegions(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.STORE_COUNT, MetricsRegionServerSource.STORE_COUNT_DESC), - tableWrapperAgg.getNumStores(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, + tableWrapperAgg.getNumStores(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MAX_STORE_FILE_AGE, MetricsRegionServerSource.MAX_STORE_FILE_AGE_DESC), - tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, + tableWrapperAgg.getMaxStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.MIN_STORE_FILE_AGE, MetricsRegionServerSource.MIN_STORE_FILE_AGE_DESC), - tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, + tableWrapperAgg.getMinStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.AVG_STORE_FILE_AGE, MetricsRegionServerSource.AVG_STORE_FILE_AGE_DESC), - tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString())); - mrb.addGauge(Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, + tableWrapperAgg.getAvgStoreFileAge(tableName.getNameAsString())); + mrb.addGauge( + Interns.info(tableNamePrefix + MetricsRegionServerSource.NUM_REFERENCE_FILES, MetricsRegionServerSource.NUM_REFERENCE_FILES_DESC), - tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString())); + tableWrapperAgg.getNumReferenceFiles(tableName.getNameAsString())); addGauge(mrb, tableWrapperAgg.getMemstoreOnlyRowReadsCount(tableName.getNameAsString()), MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE, MetricsRegionSource.ROW_READS_ONLY_ON_MEMSTORE_DESC); addGauge(mrb, tableWrapperAgg.getMixedRowReadsCount(tableName.getNameAsString()), - MetricsRegionSource.MIXED_ROW_READS, - MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); + MetricsRegionSource.MIXED_ROW_READS, MetricsRegionSource.MIXED_ROW_READS_ON_STORE_DESC); } } } @@ -334,8 +346,8 @@ private void addGauge(MetricsRecordBuilder mrb, Map metricMap, Str for (Entry entry : metricMap.entrySet()) { // append 'store' and its name to the metric mrb.addGauge(Interns.info(this.tableNamePrefixPart1 + _COLUMNFAMILY - + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] - + this.tableNamePrefixPart2 + metricName, + + entry.getKey().split(MetricsTableWrapperAggregate.HASH)[1] + this.tableNamePrefixPart2 + + metricName, metricDesc), entry.getValue()); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java index 28726c4ee1f1..85ace54b0826 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserAggregateSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; - import org.apache.hadoop.hbase.metrics.BaseSourceImpl; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -32,7 +30,7 @@ @InterfaceAudience.Private public class MetricsUserAggregateSourceImpl extends BaseSourceImpl - implements MetricsUserAggregateSource { + implements MetricsUserAggregateSource { private static final Logger LOG = LoggerFactory.getLogger(MetricsUserAggregateSourceImpl.class); @@ -43,10 +41,8 @@ public MetricsUserAggregateSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsUserAggregateSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsUserAggregateSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java index ef0eb7bf4620..871a3e42550c 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsUserSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.Collections; @@ -23,7 +22,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.metrics2.MetricHistogram; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -74,32 +72,39 @@ public ClientMetricsImpl(String hostName) { this.hostName = hostName; } - @Override public void incrementReadRequest() { + @Override + public void incrementReadRequest() { readRequestsCount.increment(); } - @Override public void incrementWriteRequest() { + @Override + public void incrementWriteRequest() { writeRequestsCount.increment(); } - @Override public String getHostName() { + @Override + public String getHostName() { return hostName; } - @Override public long getReadRequestsCount() { + @Override + public long getReadRequestsCount() { return readRequestsCount.sum(); } - @Override public long getWriteRequestsCount() { + @Override + public long getWriteRequestsCount() { return writeRequestsCount.sum(); } - @Override public void incrementFilteredReadRequests() { + @Override + public void incrementFilteredReadRequests() { filteredRequestsCount.increment(); } - @Override public long getFilteredReadRequests() { + @Override + public long getFilteredReadRequests() { return filteredRequestsCount.sum(); } } @@ -191,8 +196,8 @@ public int hashCode() { @Override public boolean equals(Object obj) { - return obj == this || - (obj instanceof MetricsUserSourceImpl && compareTo((MetricsUserSourceImpl) obj) == 0); + return obj == this + || (obj instanceof MetricsUserSourceImpl && compareTo((MetricsUserSourceImpl) obj) == 0); } void snapshot(MetricsRecordBuilder mrb, boolean ignored) { @@ -252,16 +257,19 @@ public void updateScanTime(long t) { scanTimeHisto.add(t); } - @Override public void getMetrics(MetricsCollector metricsCollector, boolean all) { + @Override + public void getMetrics(MetricsCollector metricsCollector, boolean all) { MetricsRecordBuilder mrb = metricsCollector.addRecord(this.userNamePrefix); registry.snapshot(mrb, all); } - @Override public Map getClientMetrics() { + @Override + public Map getClientMetrics() { return Collections.unmodifiableMap(clientMetricsMap); } - @Override public ClientMetrics getOrCreateMetricsClient(String client) { + @Override + public ClientMetrics getOrCreateMetricsClient(String client) { ClientMetrics source = clientMetricsMap.get(client); if (source != null) { return source; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java index 4f71681113c5..501e02c7f156 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/wal/MetricsWALSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver.wal; import java.util.concurrent.ConcurrentHashMap; @@ -27,9 +26,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Class that transitions metrics from MetricsWAL into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. + * Class that transitions metrics from MetricsWAL into the metrics subsystem. Implements BaseSource + * through BaseSourceImpl, following the pattern. * @see org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource */ @InterfaceAudience.Private @@ -55,13 +53,11 @@ public MetricsWALSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - public MetricsWALSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsWALSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - //Create and store the metrics that will be used. + // Create and store the metrics that will be used. appendTimeHisto = this.getMetricsRegistry().newTimeHistogram(APPEND_TIME, APPEND_TIME_DESC); appendSizeHisto = this.getMetricsRegistry().newSizeHistogram(APPEND_SIZE, APPEND_SIZE_DESC); appendCount = this.getMetricsRegistry().newCounter(APPEND_COUNT, APPEND_COUNT_DESC, 0L); @@ -70,17 +66,17 @@ public MetricsWALSourceImpl(String metricsName, syncTimeHisto = this.getMetricsRegistry().newTimeHistogram(SYNC_TIME, SYNC_TIME_DESC); logRollRequested = this.getMetricsRegistry().newCounter(ROLL_REQUESTED, ROLL_REQUESTED_DESC, 0L); - errorRollRequested = this.getMetricsRegistry() - .newCounter(ERROR_ROLL_REQUESTED, ERROR_ROLL_REQUESTED_DESC, 0L); - lowReplicationRollRequested = this.getMetricsRegistry() - .newCounter(LOW_REPLICA_ROLL_REQUESTED, LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); - slowSyncRollRequested = this.getMetricsRegistry() - .newCounter(SLOW_SYNC_ROLL_REQUESTED, SLOW_SYNC_ROLL_REQUESTED_DESC, 0L); - sizeRollRequested = this.getMetricsRegistry() - .newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); + errorRollRequested = + this.getMetricsRegistry().newCounter(ERROR_ROLL_REQUESTED, ERROR_ROLL_REQUESTED_DESC, 0L); + lowReplicationRollRequested = this.getMetricsRegistry().newCounter(LOW_REPLICA_ROLL_REQUESTED, + LOW_REPLICA_ROLL_REQUESTED_DESC, 0L); + slowSyncRollRequested = this.getMetricsRegistry().newCounter(SLOW_SYNC_ROLL_REQUESTED, + SLOW_SYNC_ROLL_REQUESTED_DESC, 0L); + sizeRollRequested = + this.getMetricsRegistry().newCounter(SIZE_ROLL_REQUESTED, SIZE_ROLL_REQUESTED_DESC, 0L); writtenBytes = this.getMetricsRegistry().newCounter(WRITTEN_BYTES, WRITTEN_BYTES_DESC, 0L); - successfulLogRolls = this.getMetricsRegistry() - .newCounter(SUCCESSFUL_LOG_ROLLS, SUCCESSFUL_LOG_ROLLS_DESC, 0L); + successfulLogRolls = + this.getMetricsRegistry().newCounter(SUCCESSFUL_LOG_ROLLS, SUCCESSFUL_LOG_ROLLS_DESC, 0L); perTableAppendCount = new ConcurrentHashMap<>(); perTableAppendSize = new ConcurrentHashMap<>(); } @@ -93,8 +89,8 @@ public void incrementAppendSize(TableName tableName, long size) { // Ideally putIfAbsent is atomic and we don't need a branch check but we still do it to avoid // expensive string construction for every append. String metricsKey = String.format("%s.%s", tableName, APPEND_SIZE); - perTableAppendSize.putIfAbsent( - tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); + perTableAppendSize.putIfAbsent(tableName, + getMetricsRegistry().newCounter(metricsKey, APPEND_SIZE_DESC, 0L)); tableAppendSizeCounter = perTableAppendSize.get(tableName); } tableAppendSizeCounter.incr(size); @@ -111,8 +107,8 @@ public void incrementAppendCount(TableName tableName) { MutableFastCounter tableAppendCounter = perTableAppendCount.get(tableName); if (tableAppendCounter == null) { String metricsKey = String.format("%s.%s", tableName, APPEND_COUNT); - perTableAppendCount.putIfAbsent( - tableName, getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); + perTableAppendCount.putIfAbsent(tableName, + getMetricsRegistry().newCounter(metricsKey, APPEND_COUNT_DESC, 0L)); tableAppendCounter = perTableAppendCount.get(tableName); } tableAppendCounter.incr(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java index 5eb5deb03f6c..3ec9faf6b985 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationGlobalSourceSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -26,7 +25,7 @@ @InterfaceAudience.Private public class MetricsReplicationGlobalSourceSourceImpl - implements MetricsReplicationGlobalSourceSource { + implements MetricsReplicationGlobalSourceSource { private static final String KEY_PREFIX = "source."; private final MetricsReplicationSourceImpl rms; @@ -62,7 +61,7 @@ public MetricsReplicationGlobalSourceSourceImpl(MetricsReplicationSourceImpl rms this.rms = rms; ageOfLastShippedOpHist = - rms.getMetricsRegistry().newTimeHistogram(SOURCE_AGE_OF_LAST_SHIPPED_OP); + rms.getMetricsRegistry().newTimeHistogram(SOURCE_AGE_OF_LAST_SHIPPED_OP); sizeOfLogQueueGauge = rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_LOG_QUEUE, 0L); @@ -85,53 +84,60 @@ public MetricsReplicationGlobalSourceSourceImpl(MetricsReplicationSourceImpl rms sizeOfHFileRefsQueueGauge = rms.getMetricsRegistry().getGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L); - unknownFileLengthForClosedWAL = rms.getMetricsRegistry() - .getCounter(SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH, 0L); + unknownFileLengthForClosedWAL = + rms.getMetricsRegistry().getCounter(SOURCE_CLOSED_LOGS_WITH_UNKNOWN_LENGTH, 0L); uncleanlyClosedWAL = rms.getMetricsRegistry().getCounter(SOURCE_UNCLEANLY_CLOSED_LOGS, 0L); - uncleanlyClosedSkippedBytes = rms.getMetricsRegistry() - .getCounter(SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES, 0L); + uncleanlyClosedSkippedBytes = + rms.getMetricsRegistry().getCounter(SOURCE_UNCLEANLY_CLOSED_IGNORED_IN_BYTES, 0L); restartWALReading = rms.getMetricsRegistry().getCounter(SOURCE_RESTARTED_LOG_READING, 0L); repeatedFileBytes = rms.getMetricsRegistry().getCounter(SOURCE_REPEATED_LOG_FILE_BYTES, 0L); completedWAL = rms.getMetricsRegistry().getCounter(SOURCE_COMPLETED_LOGS, 0L); - completedRecoveryQueue = rms.getMetricsRegistry() - .getCounter(SOURCE_COMPLETED_RECOVERY_QUEUES, 0L); - failedRecoveryQueue = rms.getMetricsRegistry() - .getCounter(SOURCE_FAILED_RECOVERY_QUEUES, 0L); + completedRecoveryQueue = + rms.getMetricsRegistry().getCounter(SOURCE_COMPLETED_RECOVERY_QUEUES, 0L); + failedRecoveryQueue = rms.getMetricsRegistry().getCounter(SOURCE_FAILED_RECOVERY_QUEUES, 0L); - walReaderBufferUsageBytes = rms.getMetricsRegistry() - .getGauge(SOURCE_WAL_READER_EDITS_BUFFER, 0L); + walReaderBufferUsageBytes = + rms.getMetricsRegistry().getGauge(SOURCE_WAL_READER_EDITS_BUFFER, 0L); sourceInitializing = rms.getMetricsRegistry().getGaugeInt(SOURCE_INITIALIZING, 0); } - @Override public void setLastShippedAge(long age) { + @Override + public void setLastShippedAge(long age) { ageOfLastShippedOpHist.add(age); } - @Override public void incrSizeOfLogQueue(int size) { + @Override + public void incrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.incr(size); } - @Override public void decrSizeOfLogQueue(int size) { + @Override + public void decrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.decr(size); } - @Override public void incrLogReadInEdits(long size) { + @Override + public void incrLogReadInEdits(long size) { logReadInEditsCounter.incr(size); } - @Override public void incrLogEditsFiltered(long size) { + @Override + public void incrLogEditsFiltered(long size) { walEditsFilteredCounter.incr(size); } - @Override public void incrBatchesShipped(int batches) { + @Override + public void incrBatchesShipped(int batches) { shippedBatchesCounter.incr(batches); } - @Override public void incrOpsShipped(long ops) { + @Override + public void incrOpsShipped(long ops) { shippedOpsCounter.incr(ops); } - @Override public void incrShippedBytes(long size) { + @Override + public void incrShippedBytes(long size) { shippedBytesCounter.incr(size); // obtained value maybe smaller than 1024. We should make sure that KB count // eventually picks up even from multiple smaller updates. @@ -141,7 +147,7 @@ public MetricsReplicationGlobalSourceSourceImpl(MetricsReplicationSourceImpl rms static void incrementKBsCounter(MutableFastCounter bytesCounter, MutableFastCounter kbsCounter) { // Following code should be thread-safe. long delta = 0; - while(true) { + while (true) { long bytes = bytesCounter.value(); delta = (bytes / 1024) - kbsCounter.value(); if (delta > 0) { @@ -151,11 +157,14 @@ static void incrementKBsCounter(MutableFastCounter bytesCounter, MutableFastCoun } } } - @Override public void incrLogReadInBytes(long size) { + + @Override + public void incrLogReadInBytes(long size) { logReadInBytesCounter.incr(size); } - @Override public void clear() { + @Override + public void clear() { } @Override @@ -163,7 +172,8 @@ public long getLastShippedAge() { return ageOfLastShippedOpHist.getMax(); } - @Override public void incrHFilesShipped(long hfiles) { + @Override + public void incrHFilesShipped(long hfiles) { shippedHFilesCounter.incr(hfiles); } @@ -179,13 +189,14 @@ public void decrSizeOfHFileRefsQueue(long size) { @Override public int getSizeOfLogQueue() { - return (int)sizeOfLogQueueGauge.value(); + return (int) sizeOfLogQueueGauge.value(); } @Override public void incrUnknownFileLengthForClosedWAL() { unknownFileLengthForClosedWAL.incr(1L); } + @Override public void incrUncleanlyClosedWALs() { uncleanlyClosedWAL.incr(1L); @@ -200,22 +211,27 @@ public long getUncleanlyClosedWALs() { public void incrBytesSkippedInUncleanlyClosedWALs(final long bytes) { uncleanlyClosedSkippedBytes.incr(bytes); } + @Override public void incrRestartedWALReading() { restartWALReading.incr(1L); } + @Override public void incrRepeatedFileBytes(final long bytes) { repeatedFileBytes.incr(bytes); } + @Override public void incrCompletedWAL() { completedWAL.incr(1L); } + @Override public void incrCompletedRecoveryQueue() { completedRecoveryQueue.incr(1L); } + @Override public void incrFailedRecoveryQueue() { failedRecoveryQueue.incr(1L); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java index ce45af5ccec7..84a7458a257c 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSinkSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.metrics2.lib.MutableFastCounter; @@ -37,15 +36,18 @@ public MetricsReplicationSinkSourceImpl(MetricsReplicationSourceImpl rms) { hfilesCounter = rms.getMetricsRegistry().getCounter(SINK_APPLIED_HFILES, 0L); } - @Override public void setLastAppliedOpAge(long age) { + @Override + public void setLastAppliedOpAge(long age) { ageHist.add(age); } - @Override public void incrAppliedBatches(long batches) { + @Override + public void incrAppliedBatches(long batches) { batchesCounter.incr(batches); } - @Override public void incrAppliedOps(long batchsize) { + @Override + public void incrAppliedOps(long batchsize) { opsCounter.incr(batchsize); } @@ -59,7 +61,8 @@ public void incrAppliedHFiles(long hfiles) { hfilesCounter.incr(hfiles); } - @Override public long getSinkAppliedOps() { + @Override + public long getSinkAppliedOps() { return opsCounter.value(); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java index c0cd1c73e0c1..5b223b423927 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceFactoryImpl.java @@ -24,22 +24,27 @@ public class MetricsReplicationSourceFactoryImpl implements MetricsReplicationSo private static enum SourceHolder { INSTANCE; + final MetricsReplicationSourceImpl source = new MetricsReplicationSourceImpl(); } - @Override public MetricsReplicationSinkSource getSink() { + @Override + public MetricsReplicationSinkSource getSink() { return new MetricsReplicationSinkSourceImpl(SourceHolder.INSTANCE.source); } - @Override public MetricsReplicationSourceSource getSource(String id) { + @Override + public MetricsReplicationSourceSource getSource(String id) { return new MetricsReplicationSourceSourceImpl(SourceHolder.INSTANCE.source, id); } - @Override public MetricsReplicationTableSource getTableSource(String tableName) { + @Override + public MetricsReplicationTableSource getTableSource(String tableName) { return new MetricsReplicationTableSourceImpl(SourceHolder.INSTANCE.source, tableName); } - @Override public MetricsReplicationGlobalSourceSource getGlobalSource() { + @Override + public MetricsReplicationGlobalSourceSource getGlobalSource() { return new MetricsReplicationGlobalSourceSourceImpl(SourceHolder.INSTANCE.source); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java index 02045f8bbd13..f841a97929e3 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication.regionserver; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -23,23 +22,18 @@ /** * Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and - * counters. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * counters. Implements BaseSource through BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsReplicationSourceImpl extends BaseSourceImpl implements - MetricsReplicationSource { - +public class MetricsReplicationSourceImpl extends BaseSourceImpl + implements MetricsReplicationSource { public MetricsReplicationSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT); } - MetricsReplicationSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + MetricsReplicationSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java index 284991201191..4651ea7041c9 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationSourceSourceImpl.java @@ -143,45 +143,55 @@ public MetricsReplicationSourceSourceImpl(MetricsReplicationSourceImpl rms, Stri sourceInitializing = rms.getMetricsRegistry().getGaugeInt(sourceInitializingKey, 0); } - @Override public void setLastShippedAge(long age) { + @Override + public void setLastShippedAge(long age) { ageOfLastShippedOpHist.add(age); } - @Override public void incrSizeOfLogQueue(int size) { + @Override + public void incrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.incr(size); } - @Override public void decrSizeOfLogQueue(int size) { + @Override + public void decrSizeOfLogQueue(int size) { sizeOfLogQueueGauge.decr(size); } - @Override public void incrLogReadInEdits(long size) { + @Override + public void incrLogReadInEdits(long size) { logReadInEditsCounter.incr(size); } - @Override public void incrLogEditsFiltered(long size) { + @Override + public void incrLogEditsFiltered(long size) { walEditsFilteredCounter.incr(size); } - @Override public void incrBatchesShipped(int batches) { + @Override + public void incrBatchesShipped(int batches) { shippedBatchesCounter.incr(batches); } - @Override public void incrOpsShipped(long ops) { + @Override + public void incrOpsShipped(long ops) { shippedOpsCounter.incr(ops); } - @Override public void incrShippedBytes(long size) { + @Override + public void incrShippedBytes(long size) { shippedBytesCounter.incr(size); - MetricsReplicationGlobalSourceSourceImpl - .incrementKBsCounter(shippedBytesCounter, shippedKBsCounter); + MetricsReplicationGlobalSourceSourceImpl.incrementKBsCounter(shippedBytesCounter, + shippedKBsCounter); } - @Override public void incrLogReadInBytes(long size) { + @Override + public void incrLogReadInBytes(long size) { logReadInBytesCounter.incr(size); } - @Override public void clear() { + @Override + public void clear() { rms.removeMetric(ageOfLastShippedOpKey); rms.removeMetric(sizeOfLogQueueKey); @@ -232,7 +242,7 @@ public void decrSizeOfHFileRefsQueue(long size) { @Override public int getSizeOfLogQueue() { - return (int)sizeOfLogQueueGauge.value(); + return (int) sizeOfLogQueueGauge.value(); } @Override @@ -276,13 +286,16 @@ public void incrCompletedRecoveryQueue() { } @Override - public void incrFailedRecoveryQueue() {/*no op*/} + public void incrFailedRecoveryQueue() { + /* no op */} - @Override public void setOldestWalAge(long age) { + @Override + public void setOldestWalAge(long age) { oldestWalAge.set(age); } - @Override public long getOldestWalAge() { + @Override + public long getOldestWalAge() { return oldestWalAge.value(); } @@ -296,7 +309,8 @@ public int getSourceInitializing() { return sourceInitializing.value(); } - @Override public void decrSourceInitializing() { + @Override + public void decrSourceInitializing() { sourceInitializing.decr(1); } @@ -355,15 +369,18 @@ public String getMetricsName() { return rms.getMetricsName(); } - @Override public long getWALEditsRead() { + @Override + public long getWALEditsRead() { return this.logReadInEditsCounter.value(); } - @Override public long getShippedOps() { + @Override + public long getShippedOps() { return this.shippedOpsCounter.value(); } - @Override public long getEditsFiltered() { + @Override + public long getEditsFiltered() { return this.walEditsFilteredCounter.value(); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java index 9ca0cd1a94ef..244298faff66 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/replication/regionserver/MetricsReplicationTableSourceImpl.java @@ -22,9 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * This is the metric source for table level replication metrics. - * We can easy monitor some useful table level replication metrics such as - * ageOfLastShippedOp and shippedBytes + * This is the metric source for table level replication metrics. We can easy monitor some useful + * table level replication metrics such as ageOfLastShippedOp and shippedBytes */ @InterfaceAudience.Private public class MetricsReplicationTableSourceImpl implements MetricsReplicationTableSource { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java index 3474265ee26c..45df51579762 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/rest/MetricsRESTSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,9 +24,8 @@ /** * Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to - * the hadoop metrics2 subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * the hadoop metrics2 subsystem. Implements BaseSource through BaseSourceImpl, following the + * pattern */ @InterfaceAudience.Private public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource { @@ -55,17 +53,15 @@ public MetricsRESTSourceImpl() { this(METRICS_NAME, METRICS_DESCRIPTION, CONTEXT, JMX_CONTEXT); } - public MetricsRESTSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsRESTSourceImpl(String metricsName, String metricsDescription, String metricsContext, + String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java index 3ce2d5d1fdc1..7eb9adfb89c1 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceFactoryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.yetus.audience.InterfaceAudience; @@ -32,6 +31,7 @@ public class MetricsThriftServerSourceFactoryImpl implements MetricsThriftServer */ private enum FactoryStorage { INSTANCE; + MetricsThriftServerSourceImpl thriftOne; MetricsThriftServerSourceImpl thriftTwo; } @@ -40,9 +40,7 @@ private enum FactoryStorage { public MetricsThriftServerSource createThriftOneSource() { if (FactoryStorage.INSTANCE.thriftOne == null) { FactoryStorage.INSTANCE.thriftOne = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_ONE_METRICS_CONTEXT, - THRIFT_ONE_JMX_CONTEXT); + METRICS_DESCRIPTION, THRIFT_ONE_METRICS_CONTEXT, THRIFT_ONE_JMX_CONTEXT); } return FactoryStorage.INSTANCE.thriftOne; } @@ -51,9 +49,7 @@ public MetricsThriftServerSource createThriftOneSource() { public MetricsThriftServerSource createThriftTwoSource() { if (FactoryStorage.INSTANCE.thriftTwo == null) { FactoryStorage.INSTANCE.thriftTwo = new MetricsThriftServerSourceImpl(METRICS_NAME, - METRICS_DESCRIPTION, - THRIFT_TWO_METRICS_CONTEXT, - THRIFT_TWO_JMX_CONTEXT); + METRICS_DESCRIPTION, THRIFT_TWO_METRICS_CONTEXT, THRIFT_TWO_JMX_CONTEXT); } return FactoryStorage.INSTANCE.thriftTwo; } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java index 4ed974c95dce..81a54d31767b 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/thrift/MetricsThriftServerSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.thrift; import org.apache.hadoop.hbase.metrics.ExceptionTrackingSourceImpl; @@ -26,13 +25,12 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Hadoop 2 version of {@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} - * - * Implements BaseSource through BaseSourceImpl, following the pattern + * Hadoop 2 version of {@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource} Implements + * BaseSource through BaseSourceImpl, following the pattern */ @InterfaceAudience.Private -public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl implements - MetricsThriftServerSource { +public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl + implements MetricsThriftServerSource { private MetricHistogram batchGetStat; private MetricHistogram batchMutateStat; @@ -51,17 +49,15 @@ public class MetricsThriftServerSourceImpl extends ExceptionTrackingSourceImpl i private final MetricHistogram pausesWithGc; private final MetricHistogram pausesWithoutGc; - public MetricsThriftServerSourceImpl(String metricsName, - String metricsDescription, - String metricsContext, - String metricsJmxContext) { + public MetricsThriftServerSourceImpl(String metricsName, String metricsDescription, + String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); // pause monitor metrics - infoPauseThresholdExceeded = getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, - INFO_THRESHOLD_COUNT_DESC, 0L); - warnPauseThresholdExceeded = getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, - WARN_THRESHOLD_COUNT_DESC, 0L); + infoPauseThresholdExceeded = + getMetricsRegistry().newCounter(INFO_THRESHOLD_COUNT_KEY, INFO_THRESHOLD_COUNT_DESC, 0L); + warnPauseThresholdExceeded = + getMetricsRegistry().newCounter(WARN_THRESHOLD_COUNT_KEY, WARN_THRESHOLD_COUNT_DESC, 0L); pausesWithGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITH_GC_KEY); pausesWithoutGc = getMetricsRegistry().newTimeHistogram(PAUSE_TIME_WITHOUT_GC_KEY); } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java index 50ebd46b7166..7b1efb399af1 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/zookeeper/MetricsZooKeeperSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.zookeeper; import org.apache.hadoop.hbase.metrics.BaseSourceImpl; @@ -25,9 +24,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Class that transitions metrics from MetricsZooKeeper into the metrics subsystem. - * - * Implements BaseSource through BaseSourceImpl, following the pattern. + * Class that transitions metrics from MetricsZooKeeper into the metrics subsystem. Implements + * BaseSource through BaseSourceImpl, following the pattern. */ @InterfaceAudience.Private public class MetricsZooKeeperSourceImpl extends BaseSourceImpl implements MetricsZooKeeperSource { @@ -55,34 +53,34 @@ public MetricsZooKeeperSourceImpl(String metricsName, String metricsDescription, String metricsContext, String metricsJmxContext) { super(metricsName, metricsDescription, metricsContext, metricsJmxContext); - //Create and store the metrics that will be used. - authFailedFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_AUTHFAILED, EXCEPTION_AUTHFAILED_DESC, 0L); - connectionLossFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_CONNECTIONLOSS, EXCEPTION_CONNECTIONLOSS_DESC, 0L); - dataInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_DATAINCONSISTENCY, EXCEPTION_DATAINCONSISTENCY_DESC, 0L); - invalidACLFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_INVALIDACL, EXCEPTION_INVALIDACL_DESC, 0L); - noAuthFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_NOAUTH, EXCEPTION_NOAUTH_DESC, 0L); - operationTimeOutFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_OPERATIONTIMEOUT, EXCEPTION_OPERATIONTIMEOUT_DESC, 0L); - runtimeInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_RUNTIMEINCONSISTENCY, EXCEPTION_RUNTIMEINCONSISTENCY_DESC, 0L); - sessionExpiredFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_SESSIONEXPIRED, EXCEPTION_SESSIONEXPIRED_DESC, 0L); - systemErrorFailedOpCount = this.getMetricsRegistry().newGauge( - EXCEPTION_SYSTEMERROR, EXCEPTION_SYSTEMERROR_DESC, 0L); - totalFailedZKCalls = this.getMetricsRegistry().newGauge( - TOTAL_FAILED_ZK_CALLS, TOTAL_FAILED_ZK_CALLS_DESC, 0L); - - readOpLatency = this.getMetricsRegistry().newHistogram( - READ_OPERATION_LATENCY_NAME, READ_OPERATION_LATENCY_DESC); - writeOpLatency = this.getMetricsRegistry().newHistogram( - WRITE_OPERATION_LATENCY_NAME, WRITE_OPERATION_LATENCY_DESC); - syncOpLatency = this.getMetricsRegistry().newHistogram( - SYNC_OPERATION_LATENCY_NAME, SYNC_OPERATION_LATENCY_DESC); + // Create and store the metrics that will be used. + authFailedFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_AUTHFAILED, EXCEPTION_AUTHFAILED_DESC, 0L); + connectionLossFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_CONNECTIONLOSS, + EXCEPTION_CONNECTIONLOSS_DESC, 0L); + dataInconsistencyFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_DATAINCONSISTENCY, + EXCEPTION_DATAINCONSISTENCY_DESC, 0L); + invalidACLFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_INVALIDACL, EXCEPTION_INVALIDACL_DESC, 0L); + noAuthFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_NOAUTH, EXCEPTION_NOAUTH_DESC, 0L); + operationTimeOutFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_OPERATIONTIMEOUT, + EXCEPTION_OPERATIONTIMEOUT_DESC, 0L); + runtimeInconsistencyFailedOpCount = this.getMetricsRegistry() + .newGauge(EXCEPTION_RUNTIMEINCONSISTENCY, EXCEPTION_RUNTIMEINCONSISTENCY_DESC, 0L); + sessionExpiredFailedOpCount = this.getMetricsRegistry().newGauge(EXCEPTION_SESSIONEXPIRED, + EXCEPTION_SESSIONEXPIRED_DESC, 0L); + systemErrorFailedOpCount = + this.getMetricsRegistry().newGauge(EXCEPTION_SYSTEMERROR, EXCEPTION_SYSTEMERROR_DESC, 0L); + totalFailedZKCalls = + this.getMetricsRegistry().newGauge(TOTAL_FAILED_ZK_CALLS, TOTAL_FAILED_ZK_CALLS_DESC, 0L); + + readOpLatency = this.getMetricsRegistry().newHistogram(READ_OPERATION_LATENCY_NAME, + READ_OPERATION_LATENCY_DESC); + writeOpLatency = this.getMetricsRegistry().newHistogram(WRITE_OPERATION_LATENCY_NAME, + WRITE_OPERATION_LATENCY_DESC); + syncOpLatency = this.getMetricsRegistry().newHistogram(SYNC_OPERATION_LATENCY_NAME, + SYNC_OPERATION_LATENCY_DESC); } public void getMetrics(MetricsCollector metricsCollector, boolean all) { @@ -91,7 +89,7 @@ public void getMetrics(MetricsCollector metricsCollector, boolean all) { } private void clearZKExceptionMetrics() { - //Reset the exception metrics. + // Reset the exception metrics. clearMetricIfNotNull(authFailedFailedOpCount); clearMetricIfNotNull(connectionLossFailedOpCount); clearMetricIfNotNull(dataInconsistencyFailedOpCount); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java index 88b491ba3ea1..9b62cd898f61 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/impl/JmxCacheBuster.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; - import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsExecutorImpl; @@ -32,11 +31,9 @@ /** * JMX caches the beans that have been exported; even after the values are removed from hadoop's - * metrics system the keys and old values will still remain. This class stops and restarts the - * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. - * - * This class need to be in the o.a.h.metrics2.impl namespace as many of the variables/calls used - * are package private. + * metrics system the keys and old values will still remain. This class stops and restarts the + * Hadoop metrics system, forcing JMX to clear the cache of exported metrics. This class need to be + * in the o.a.h.metrics2.impl namespace as many of the variables/calls used are package private. */ @InterfaceAudience.Private public final class JmxCacheBuster { @@ -56,7 +53,7 @@ public static void clearJmxCache() { if (LOG.isTraceEnabled()) { LOG.trace("clearing JMX Cache" + StringUtils.stringifyException(new Exception())); } - //If there are more then 100 ms before the executor will run then everything should be merged. + // If there are more then 100 ms before the executor will run then everything should be merged. ScheduledFuture future = fut.get(); if ((future != null && (!future.isDone() && future.getDelay(TimeUnit.MILLISECONDS) > 100))) { // BAIL OUT @@ -104,9 +101,9 @@ public void run() { Thread.sleep(500); DefaultMetricsSystem.instance().start(); } - } catch (Exception exception) { + } catch (Exception exception) { LOG.debug("error clearing the jmx it appears the metrics system hasn't been started", - exception); + exception); } } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java index 723e6d34c1d7..09556707648e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystemHelper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -77,8 +77,8 @@ public boolean removeObjectName(final String name) { * so far as a Source, thus preventing further re-registration of the source with the same name. * In case of dynamic metrics tied to region-lifecycles, this becomes a problem because we would * like to be able to re-register and remove with the same name. Otherwise, it is resource leak. - * This ugly code manually removes the name from the UniqueNames map. - * TODO: May not be needed for Hadoop versions after YARN-5190. + * This ugly code manually removes the name from the UniqueNames map. TODO: May not be needed for + * Hadoop versions after YARN-5190. */ public void removeSourceName(String name) { if (sourceNamesField == null || mapField == null) { @@ -92,8 +92,9 @@ public void removeSourceName(String name) { } } catch (Exception ex) { if (LOG.isTraceEnabled()) { - LOG.trace("Received exception while trying to access Hadoop Metrics classes via " + - "reflection.", ex); + LOG.trace( + "Received exception while trying to access Hadoop Metrics classes via " + "reflection.", + ex); } } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java index 7a791c92bc1e..354e0e25627f 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/DynamicMetricsRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.Collection; @@ -29,51 +28,41 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.MoreObjects; import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * An optional metrics registry class for creating and maintaining a - * collection of MetricsMutables, making writing metrics source easier. - * NOTE: this is a copy of org.apache.hadoop.metrics2.lib.MetricsRegistry with added one - * feature: metrics can be removed. When HADOOP-8313 is fixed, usages of this class - * should be substituted with org.apache.hadoop.metrics2.lib.MetricsRegistry. - * This implementation also provides handy methods for creating metrics - * dynamically. - * Another difference is that metricsMap implementation is substituted with - * thread-safe map, as we allow dynamic metrics additions/removals. + * An optional metrics registry class for creating and maintaining a collection of MetricsMutables, + * making writing metrics source easier. NOTE: this is a copy of + * org.apache.hadoop.metrics2.lib.MetricsRegistry with added one feature: metrics can be removed. + * When HADOOP-8313 is fixed, usages of this class should be substituted with + * org.apache.hadoop.metrics2.lib.MetricsRegistry. This implementation also provides handy methods + * for creating metrics dynamically. Another difference is that metricsMap implementation is + * substituted with thread-safe map, as we allow dynamic metrics additions/removals. */ @InterfaceAudience.Private public class DynamicMetricsRegistry { private static final Logger LOG = LoggerFactory.getLogger(DynamicMetricsRegistry.class); - private final ConcurrentMap metricsMap = - Maps.newConcurrentMap(); - private final ConcurrentMap tagsMap = - Maps.newConcurrentMap(); + private final ConcurrentMap metricsMap = Maps.newConcurrentMap(); + private final ConcurrentMap tagsMap = Maps.newConcurrentMap(); private final MetricsInfo metricsInfo; private final DefaultMetricsSystemHelper helper = new DefaultMetricsSystemHelper(); - private final static String[] histogramSuffixes = new String[]{ - "_num_ops", - "_min", - "_max", - "_median", - "_75th_percentile", - "_90th_percentile", - "_95th_percentile", - "_99th_percentile"}; + private final static String[] histogramSuffixes = new String[] { "_num_ops", "_min", "_max", + "_median", "_75th_percentile", "_90th_percentile", "_95th_percentile", "_99th_percentile" }; /** * Construct the registry with a record name - * @param name of the record of the metrics + * @param name of the record of the metrics */ public DynamicMetricsRegistry(String name) { - this(Interns.info(name,name)); + this(Interns.info(name, name)); } /** * Construct the registry with a metadata object - * @param info the info object for the metrics record/group + * @param info the info object for the metrics record/group */ public DynamicMetricsRegistry(MetricsInfo info) { metricsInfo = info; @@ -88,7 +77,7 @@ public MetricsInfo info() { /** * Get a metric by name - * @param name of the metric + * @param name of the metric * @return the metric object */ public MutableMetric get(String name) { @@ -97,7 +86,7 @@ public MutableMetric get(String name) { /** * Get a tag by name - * @param name of the tag + * @param name of the tag * @return the tag object */ public MetricsTag getTag(String name) { @@ -106,9 +95,9 @@ public MetricsTag getTag(String name) { /** * Create a mutable long integer counter - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(String name, String desc, long iVal) { @@ -117,8 +106,8 @@ public MutableFastCounter newCounter(String name, String desc, long iVal) { /** * Create a mutable long integer counter - * @param info metadata of the metric - * @param iVal initial value + * @param info metadata of the metric + * @param iVal initial value * @return a new counter object */ public MutableFastCounter newCounter(MetricsInfo info, long iVal) { @@ -128,9 +117,9 @@ public MutableFastCounter newCounter(MetricsInfo info, long iVal) { /** * Create a mutable long integer gauge - * @param name of the metric - * @param desc metric description - * @param iVal initial value + * @param name of the metric + * @param desc metric description + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(String name, String desc, long iVal) { @@ -139,8 +128,8 @@ public MutableGaugeLong newGauge(String name, String desc, long iVal) { /** * Create a mutable long integer gauge - * @param info metadata of the metric - * @param iVal initial value + * @param info metadata of the metric + * @param iVal initial value * @return a new gauge object */ public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { @@ -150,36 +139,34 @@ public MutableGaugeLong newGauge(MetricsInfo info, long iVal) { /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") - * @param extended produce extended stat (stdev, min/max etc.) if true. + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") + * @param extended produce extended stat (stdev, min/max etc.) if true. * @return a new mutable stat metric object */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName, boolean extended) { - MutableStat ret = - new MutableStat(name, desc, sampleName, valueName, extended); + public MutableStat newStat(String name, String desc, String sampleName, String valueName, + boolean extended) { + MutableStat ret = new MutableStat(name, desc, sampleName, valueName, extended); return addNewMetricIfAbsent(name, ret, MutableStat.class); } /** * Create a mutable metric with stats - * @param name of the metric - * @param desc metric description - * @param sampleName of the metric (e.g., "Ops") - * @param valueName of the metric (e.g., "Time" or "Latency") + * @param name of the metric + * @param desc metric description + * @param sampleName of the metric (e.g., "Ops") + * @param valueName of the metric (e.g., "Time" or "Latency") * @return a new mutable metric object */ - public MutableStat newStat(String name, String desc, - String sampleName, String valueName) { + public MutableStat newStat(String name, String desc, String sampleName, String valueName) { return newStat(name, desc, sampleName, valueName, false); } /** * Create a mutable rate metric - * @param name of the metric + * @param name of the metric * @return a new mutable metric object */ public MutableRate newRate(String name) { @@ -188,7 +175,7 @@ public MutableRate newRate(String name) { /** * Create a mutable rate metric - * @param name of the metric + * @param name of the metric * @param description of the metric * @return a new mutable rate metric object */ @@ -198,9 +185,9 @@ public MutableRate newRate(String name, String description) { /** * Create a mutable rate metric (for throughput measurement) - * @param name of the metric - * @param desc description - * @param extended produce extended stat (stdev/min/max etc.) if true + * @param name of the metric + * @param desc description + * @param extended produce extended stat (stdev/min/max etc.) if true * @return a new mutable rate metric object */ public MutableRate newRate(String name, String desc, boolean extended) { @@ -208,8 +195,7 @@ public MutableRate newRate(String name, String desc, boolean extended) { } @InterfaceAudience.Private - public MutableRate newRate(String name, String desc, - boolean extended, boolean returnExisting) { + public MutableRate newRate(String name, String desc, boolean extended, boolean returnExisting) { if (returnExisting) { MutableMetric rate = metricsMap.get(name); if (rate != null) { @@ -217,8 +203,7 @@ public MutableRate newRate(String name, String desc, return (MutableRate) rate; } - throw new MetricsException("Unexpected metrics type "+ rate.getClass() - +" for "+ name); + throw new MetricsException("Unexpected metrics type " + rate.getClass() + " for " + name); } } MutableRate ret = new MutableRate(name, desc, extended); @@ -244,7 +229,7 @@ public MutableHistogram newHistogram(String name, String desc) { MutableHistogram histo = new MutableHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableHistogram.class); } - + /** * Create a new histogram with time range counts. * @param name Name of the histogram. @@ -264,7 +249,7 @@ public MutableTimeHistogram newTimeHistogram(String name, String desc) { MutableTimeHistogram histo = new MutableTimeHistogram(name, desc); return addNewMetricIfAbsent(name, histo, MutableTimeHistogram.class); } - + /** * Create a new histogram with size range counts. * @param name Name of the histogram. @@ -285,14 +270,13 @@ public MutableSizeHistogram newSizeHistogram(String name, String desc) { return addNewMetricIfAbsent(name, histo, MutableSizeHistogram.class); } - synchronized void add(String name, MutableMetric metric) { addNewMetricIfAbsent(name, metric, MutableMetric.class); } /** * Add sample to a stat metric by name. - * @param name of the metric + * @param name of the metric * @param value of the snapshot to add */ public void add(String name, long value) { @@ -301,12 +285,10 @@ public void add(String name, long value) { if (m != null) { if (m instanceof MutableStat) { ((MutableStat) m).add(value); + } else { + throw new MetricsException("Unsupported add(value) for metric " + name); } - else { - throw new MetricsException("Unsupported add(value) for metric "+ name); - } - } - else { + } else { metricsMap.put(name, newRate(name)); // default is a rate metric add(name, value); } @@ -323,7 +305,7 @@ public DynamicMetricsRegistry setContext(String name) { /** * Add a tag to the metrics - * @param name of the tag + * @param name of the tag * @param description of the tag * @param value of the tag * @return the registry (for keep adding tags) @@ -334,20 +316,20 @@ public DynamicMetricsRegistry tag(String name, String description, String value) /** * Add a tag to the metrics - * @param name of the tag + * @param name of the tag * @param description of the tag * @param value of the tag - * @param override existing tag if true + * @param override existing tag if true * @return the registry (for keep adding tags) */ public DynamicMetricsRegistry tag(String name, String description, String value, - boolean override) { + boolean override) { return tag(new MetricsInfoImpl(name, description), value, override); } /** * Add a tag to the metrics - * @param info metadata of the tag + * @param info metadata of the tag * @param value of the tag * @param override existing tag if true * @return the registry (for keep adding tags etc.) @@ -358,7 +340,7 @@ public DynamicMetricsRegistry tag(MetricsInfo info, String value, boolean overri if (!override) { MetricsTag existing = tagsMap.putIfAbsent(info.name(), tag); if (existing != null) { - throw new MetricsException("Tag "+ info.name() +" already exists!"); + throw new MetricsException("Tag " + info.name() + " already exists!"); } return this; } @@ -394,10 +376,10 @@ public void snapshot(MetricsRecordBuilder builder, boolean all) { } } - @Override public String toString() { - return MoreObjects.toStringHelper(this) - .add("info", metricsInfo).add("tags", tags()).add("metrics", metrics()) - .toString(); + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("info", metricsInfo).add("tags", tags()) + .add("metrics", metrics()).toString(); } /** @@ -410,131 +392,125 @@ public void removeMetric(String name) { } public void removeHistogramMetrics(String baseName) { - for (String suffix:histogramSuffixes) { - removeMetric(baseName+suffix); + for (String suffix : histogramSuffixes) { + removeMetric(baseName + suffix); } } /** - * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. - * - * @param gaugeName name of the gauge to create or get. + * Get a MetricMutableGaugeLong from the storage. If it is not there atomically put it. + * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new gauge if we have to create it. */ public MutableGaugeLong getGauge(String gaugeName, long potentialStartingValue) { - //Try and get the guage. + // Try and get the guage. MutableMetric metric = metricsMap.get(gaugeName); - //If it's not there then try and put a new one in the storage. + // If it's not there then try and put a new one in the storage. if (metric == null) { - //Create the potential new gauge. - MutableGaugeLong newGauge = new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); + // Create the potential new gauge. + MutableGaugeLong newGauge = + new MutableGaugeLong(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeLong should contain the thing that was in before the put could be completed. + // If the value we get back is null then the put was successful and we will return that. + // otherwise gaugeLong should contain the thing that was in before the put could be completed. if (metric == null) { return newGauge; } } if (!(metric instanceof MutableGaugeLong)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeLong"); + throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + + " and not of type MetricMutableGaugeLong"); } return (MutableGaugeLong) metric; } /** - * Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it. - * - * @param gaugeName name of the gauge to create or get. + * Get a MetricMutableGaugeInt from the storage. If it is not there atomically put it. + * @param gaugeName name of the gauge to create or get. * @param potentialStartingValue value of the new gauge if we have to create it. */ public MutableGaugeInt getGaugeInt(String gaugeName, int potentialStartingValue) { - //Try and get the guage. + // Try and get the guage. MutableMetric metric = metricsMap.get(gaugeName); - //If it's not there then try and put a new one in the storage. + // If it's not there then try and put a new one in the storage. if (metric == null) { - //Create the potential new gauge. - MutableGaugeInt newGauge = new MutableGaugeInt(new MetricsInfoImpl(gaugeName, ""), - potentialStartingValue); + // Create the potential new gauge. + MutableGaugeInt newGauge = + new MutableGaugeInt(new MetricsInfoImpl(gaugeName, ""), potentialStartingValue); - // Try and put the gauge in. This is atomic. + // Try and put the gauge in. This is atomic. metric = metricsMap.putIfAbsent(gaugeName, newGauge); - //If the value we get back is null then the put was successful and we will return that. - //otherwise gaugeInt should contain the thing that was in before the put could be completed. + // If the value we get back is null then the put was successful and we will return that. + // otherwise gaugeInt should contain the thing that was in before the put could be completed. if (metric == null) { return newGauge; } } if (!(metric instanceof MutableGaugeInt)) { - throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + - " and not of type MetricMutableGaugeInr"); + throw new MetricsException("Metric already exists in registry for metric name: " + gaugeName + + " and not of type MetricMutableGaugeInr"); } return (MutableGaugeInt) metric; } /** - * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. - * - * @param counterName Name of the counter to get + * Get a MetricMutableCounterLong from the storage. If it is not there atomically put it. + * @param counterName Name of the counter to get * @param potentialStartingValue starting value if we have to create a new counter */ public MutableFastCounter getCounter(String counterName, long potentialStartingValue) { - //See getGauge for description on how this works. + // See getGauge for description on how this works. MutableMetric counter = metricsMap.get(counterName); if (counter == null) { MutableFastCounter newCounter = - new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue); + new MutableFastCounter(new MetricsInfoImpl(counterName, ""), potentialStartingValue); counter = metricsMap.putIfAbsent(counterName, newCounter); if (counter == null) { return newCounter; } } - if (!(counter instanceof MutableCounter)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - counterName + " and not of type MutableCounter"); + throw new MetricsException("Metric already exists in registry for metric name: " + counterName + + " and not of type MutableCounter"); } return (MutableFastCounter) counter; } public MutableHistogram getHistogram(String histoName) { - //See getGauge for description on how this works. + // See getGauge for description on how this works. MutableMetric histo = metricsMap.get(histoName); if (histo == null) { - MutableHistogram newCounter = - new MutableHistogram(new MetricsInfoImpl(histoName, "")); + MutableHistogram newCounter = new MutableHistogram(new MetricsInfoImpl(histoName, "")); histo = metricsMap.putIfAbsent(histoName, newCounter); if (histo == null) { return newCounter; } } - if (!(histo instanceof MutableHistogram)) { - throw new MetricsException("Metric already exists in registry for metric name: " + - histoName + " and not of type MutableHistogram"); + throw new MetricsException("Metric already exists in registry for metric name: " + histoName + + " and not of type MutableHistogram"); } return (MutableHistogram) histo; } - private T addNewMetricIfAbsent(String name, T ret, + private T addNewMetricIfAbsent(String name, T ret, Class metricClass) { - //If the value we get back is null then the put was successful and we will + // If the value we get back is null then the put was successful and we will // return that. Otherwise metric should contain the thing that was in // before the put could be completed. MutableMetric metric = metricsMap.putIfAbsent(name, ret); @@ -546,19 +522,17 @@ private T addNewMetricIfAbsent(String name, T ret, } @SuppressWarnings("unchecked") - private T returnExistingWithCast(MutableMetric metric, - Class metricClass, String name) { + private T returnExistingWithCast(MutableMetric metric, Class metricClass, String name) { if (!metricClass.isAssignableFrom(metric.getClass())) { - throw new MetricsException("Metric already exists in registry for metric name: " + - name + " and not of type " + metricClass + - " but instead of type " + metric.getClass()); + throw new MetricsException("Metric already exists in registry for metric name: " + name + + " and not of type " + metricClass + " but instead of type " + metric.getClass()); } return (T) metric; } public void clearMetrics() { - for (String name:metricsMap.keySet()) { + for (String name : metricsMap.keySet()) { helper.removeObjectName(name); } metricsMap.clear(); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java index d24f23f7f359..68b871ff4a4e 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MetricsExecutorImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,21 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.ThreadFactory; import java.util.concurrent.atomic.AtomicInteger; - import org.apache.hadoop.metrics2.MetricsExecutor; import org.apache.yetus.audience.InterfaceAudience; /** - * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by - * MetricsRegionAggregateSourceImpl, and - * JmxCacheBuster + * Class to handle the ScheduledExecutorService{@link ScheduledExecutorService} used by + * MetricsRegionAggregateSourceImpl, and JmxCacheBuster */ @InterfaceAudience.Private public class MetricsExecutorImpl implements MetricsExecutor { @@ -48,8 +45,9 @@ public void stop() { private enum ExecutorSingleton { INSTANCE; - private final transient ScheduledExecutorService scheduler = new ScheduledThreadPoolExecutor(1, - new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); + + private final transient ScheduledExecutorService scheduler = + new ScheduledThreadPoolExecutor(1, new ThreadPoolExecutorThreadFactory("HBase-Metrics2-")); } private final static class ThreadPoolExecutorThreadFactory implements ThreadFactory { diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java index 7b5ec024a508..f8f8aee35501 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableFastCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,9 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java index dc86ebe8bf76..fc7ab8cd4c85 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.commons.lang3.StringUtils; @@ -51,7 +50,8 @@ public void add(final long val) { histogram.update(val); } - @Override public long getCount() { + @Override + public long getCount() { return histogram.getCount(); } @@ -65,7 +65,7 @@ public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boo } public static void snapshot(String name, String desc, Histogram histogram, - MetricsRecordBuilder metricsRecordBuilder, boolean all) { + MetricsRecordBuilder metricsRecordBuilder, boolean all) { // Get a reference to the old histogram. Snapshot snapshot = histogram.snapshot(); if (snapshot != null) { @@ -76,27 +76,27 @@ public static void snapshot(String name, String desc, Histogram histogram, protected static void updateSnapshotMetrics(String name, String desc, Histogram histogram, Snapshot snapshot, MetricsRecordBuilder metricsRecordBuilder) { metricsRecordBuilder.addCounter(Interns.info(name + NUM_OPS_METRIC_NAME, desc), - histogram.getCount()); + histogram.getCount()); metricsRecordBuilder.addGauge(Interns.info(name + MIN_METRIC_NAME, desc), snapshot.getMin()); metricsRecordBuilder.addGauge(Interns.info(name + MAX_METRIC_NAME, desc), snapshot.getMax()); metricsRecordBuilder.addGauge(Interns.info(name + MEAN_METRIC_NAME, desc), snapshot.getMean()); metricsRecordBuilder.addGauge(Interns.info(name + TWENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get25thPercentile()); + snapshot.get25thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + MEDIAN_METRIC_NAME, desc), - snapshot.getMedian()); + snapshot.getMedian()); metricsRecordBuilder.addGauge(Interns.info(name + SEVENTY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get75thPercentile()); + snapshot.get75thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETIETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get90thPercentile()); + snapshot.get90thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_FIFTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get95thPercentile()); + snapshot.get95thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_EIGHTH_PERCENTILE_METRIC_NAME, desc), - snapshot.get98thPercentile()); + snapshot.get98thPercentile()); metricsRecordBuilder.addGauge(Interns.info(name + NINETY_NINETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get99thPercentile()); + snapshot.get99thPercentile()); metricsRecordBuilder.addGauge( - Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), - snapshot.get999thPercentile()); + Interns.info(name + NINETY_NINE_POINT_NINETH_PERCENTILE_METRIC_NAME, desc), + snapshot.get999thPercentile()); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java index 507e95400264..6146c53e1404 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableRangeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.hbase.metrics.Interns; @@ -40,16 +39,15 @@ public MutableRangeHistogram(String name, String description) { } /** - * Returns the type of range histogram size or time + * Returns the type of range histogram size or time */ public abstract String getRangeType(); - + /** - * Returns the ranges to be counted + * Returns the ranges to be counted */ public abstract long[] getRanges(); - @Override public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boolean all) { // Get a reference to the old histogram. @@ -61,7 +59,7 @@ public synchronized void snapshot(MetricsRecordBuilder metricsRecordBuilder, boo } public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder, - Snapshot snapshot) { + Snapshot snapshot) { long priorRange = 0; long cumNum = 0; @@ -71,8 +69,8 @@ public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder long val = snapshot.getCountAtOrBelow(ranges[i]); if (val - cumNum > 0) { metricsRecordBuilder.addCounter( - Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc), - val - cumNum); + Interns.info(name + "_" + rangeType + "_" + priorRange + "-" + ranges[i], desc), + val - cumNum); } priorRange = ranges[i]; cumNum = val; @@ -80,12 +78,12 @@ public void updateSnapshotRangeMetrics(MetricsRecordBuilder metricsRecordBuilder long val = snapshot.getCount(); if (val - cumNum > 0) { metricsRecordBuilder.addCounter( - Interns.info(name + "_" + rangeType + "_" + priorRange + "-inf", desc), - val - cumNum); + Interns.info(name + "_" + rangeType + "_" + priorRange + "-inf", desc), val - cumNum); } } - @Override public long getCount() { + @Override + public long getCount() { return histogram.getCount(); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java index b02efb76f9d8..b682042ac1cc 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableSizeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.metrics2.MetricsInfo; @@ -28,7 +27,8 @@ public class MutableSizeHistogram extends MutableRangeHistogram { private final static String RANGE_TYPE = "SizeRangeCount"; - private final static long[] RANGES = {10,100,1000,10000,100000,1000000,10000000,100000000}; + private final static long[] RANGES = + { 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000 }; public MutableSizeHistogram(MetricsInfo info) { this(info.name(), info.description()); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java index 7c6dfbbd5776..03cf8bd291ba 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/lib/MutableTimeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import org.apache.hadoop.metrics2.MetricsInfo; @@ -28,7 +27,7 @@ public class MutableTimeHistogram extends MutableRangeHistogram { private final static String RANGE_TYPE = "TimeRangeCount"; private final static long[] RANGES = - { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; + { 1, 3, 10, 30, 100, 300, 1000, 3000, 10000, 30000, 60000, 120000, 300000, 600000 }; public MutableTimeHistogram(MetricsInfo info) { this(info.name(), info.description()); diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java index 84a76edf72e9..237454e4d7f8 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricQuantile.java @@ -20,8 +20,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Specifies a quantile (with error bounds) to be watched by a - * {@link MetricSampleQuantiles} object. + * Specifies a quantile (with error bounds) to be watched by a {@link MetricSampleQuantiles} object. */ @InterfaceAudience.Private public class MetricQuantile { @@ -59,7 +58,6 @@ public boolean equals(Object aThat) { @Override public int hashCode() { - return (int) (Double.doubleToLongBits(quantile) ^ Double - .doubleToLongBits(error)); + return (int) (Double.doubleToLongBits(quantile) ^ Double.doubleToLongBits(error)); } } diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java index c1880f8203ba..e023a34d10b0 100644 --- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java +++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/metrics2/util/MetricSampleQuantiles.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.util; import java.io.IOException; @@ -24,24 +23,16 @@ import java.util.LinkedList; import java.util.ListIterator; import java.util.Map; - import org.apache.yetus.audience.InterfaceAudience; /** - * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm - * for streaming calculation of targeted high-percentile epsilon-approximate - * quantiles. - * - * This is a generalization of the earlier work by Greenwald and Khanna (GK), - * which essentially allows different error bounds on the targeted quantiles, - * which allows for far more efficient calculation of high-percentiles. - * - * See: Cormode, Korn, Muthukrishnan, and Srivastava - * "Effective Computation of Biased Quantiles over Data Streams" in ICDE 2005 - * - * Greenwald and Khanna, - * "Space-efficient online computation of quantile summaries" in SIGMOD 2001 - * + * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm for streaming + * calculation of targeted high-percentile epsilon-approximate quantiles. This is a generalization + * of the earlier work by Greenwald and Khanna (GK), which essentially allows different error bounds + * on the targeted quantiles, which allows for far more efficient calculation of high-percentiles. + * See: Cormode, Korn, Muthukrishnan, and Srivastava "Effective Computation of Biased Quantiles over + * Data Streams" in ICDE 2005 Greenwald and Khanna, "Space-efficient online computation of quantile + * summaries" in SIGMOD 2001 */ @InterfaceAudience.Private public class MetricSampleQuantiles { @@ -57,9 +48,8 @@ public class MetricSampleQuantiles { private LinkedList samples; /** - * Buffers incoming items to be inserted in batch. Items are inserted into - * the buffer linearly. When the buffer fills, it is flushed into the samples - * array in its entirety. + * Buffers incoming items to be inserted in batch. Items are inserted into the buffer linearly. + * When the buffer fills, it is flushed into the samples array in its entirety. */ private long[] buffer = new long[500]; private int bufferCount = 0; @@ -75,14 +65,10 @@ public MetricSampleQuantiles(MetricQuantile[] quantiles) { } /** - * Specifies the allowable error for this rank, depending on which quantiles - * are being targeted. - * - * This is the f(r_i, n) function from the CKMS paper. It's basically how wide - * the range of this rank can be. - * - * @param rank - * the index in the list of samples + * Specifies the allowable error for this rank, depending on which quantiles are being targeted. + * This is the f(r_i, n) function from the CKMS paper. It's basically how wide the range of this + * rank can be. + * @param rank the index in the list of samples */ private double allowableError(int rank) { int size = samples.size(); @@ -104,7 +90,6 @@ private double allowableError(int rank) { /** * Add a new value from the stream. - * * @param v the value to insert */ synchronized public void insert(long v) { @@ -120,8 +105,8 @@ synchronized public void insert(long v) { } /** - * Merges items from buffer into the samples array in one pass. - * This is more efficient than doing an insert on every item. + * Merges items from buffer into the samples array in one pass. This is more efficient than doing + * an insert on every item. */ private void insertBatch() { if (bufferCount == 0) { @@ -166,9 +151,8 @@ private void insertBatch() { } /** - * Try to remove extraneous items from the set of sampled items. This checks - * if an item is unnecessary based on the desired error bounds, and merges it - * with the adjacent item if it is. + * Try to remove extraneous items from the set of sampled items. This checks if an item is + * unnecessary based on the desired error bounds, and merges it with the adjacent item if it is. */ private void compress() { if (samples.size() < 2) { @@ -196,7 +180,6 @@ private void compress() { /** * Get the estimated value at the specified quantile. - * * @param quantile Queried quantile, e.g. 0.50 or 0.99. * @return Estimated value at that quantile. */ @@ -225,10 +208,8 @@ private long query(double quantile) throws IOException { /** * Get a snapshot of the current values of all the tracked quantiles. - * * @return snapshot of the tracked quantiles - * @throws IOException - * if no items have been added to the estimator + * @throws IOException if no items have been added to the estimator */ synchronized public Map snapshot() throws IOException { // flush the buffer first for best results @@ -243,7 +224,6 @@ synchronized public Map snapshot() throws IOException { /** * Returns the number of items that the estimator has processed - * * @return count total number of items processed */ synchronized public long getCount() { @@ -252,7 +232,6 @@ synchronized public long getCount() { /** * Returns the number of samples kept by the estimator - * * @return count current number of samples */ synchronized public int getSampleCount() { @@ -269,27 +248,24 @@ synchronized public void clear() { } /** - * Describes a measured value passed to the estimator, tracking additional - * metadata required by the CKMS algorithm. + * Describes a measured value passed to the estimator, tracking additional metadata required by + * the CKMS algorithm. */ private static class SampleItem { - + /** * Value of the sampled item (e.g. a measured latency value) */ private final long value; - + /** - * Difference between the lowest possible rank of the previous item, and - * the lowest possible rank of this item. - * - * The sum of the g of all previous items yields this item's lower bound. + * Difference between the lowest possible rank of the previous item, and the lowest possible + * rank of this item. The sum of the g of all previous items yields this item's lower bound. */ private int g; - + /** - * Difference between the item's greatest possible rank and lowest possible - * rank. + * Difference between the item's greatest possible rank and lowest possible rank. */ private final int delta; diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java index a022ef3e0183..ee9206af566f 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/HadoopShimsImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import org.apache.hadoop.mapreduce.Job; @@ -30,13 +29,13 @@ public class HadoopShimsImpl implements HadoopShims { * Returns a TaskAttemptContext instance created from the given parameters. * @param job an instance of o.a.h.mapreduce.Job * @param taskId an identifier for the task attempt id. Should be parsable by - * {@link TaskAttemptID#forName(String)} + * {@link TaskAttemptID#forName(String)} * @return a concrete TaskAttemptContext instance of o.a.h.mapreduce.TaskAttemptContext */ @Override @SuppressWarnings("unchecked") public T createTestTaskAttemptContext(J job, String taskId) { - Job j = (Job)job; - return (T)new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); + Job j = (Job) job; + return (T) new TaskAttemptContextImpl(j.getConfiguration(), TaskAttemptID.forName(taskId)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java index d95c282ecf99..ee1092d6fafe 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterProcSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,9 +29,9 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsMasterProcSourceImpl + * Test for MetricsMasterProcSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterProcSourceImpl { @ClassRule @@ -40,12 +40,12 @@ public class TestMetricsMasterProcSourceImpl { @Test public void testGetInstance() throws Exception { - MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterProcSourceFactory.class); + MetricsMasterProcSourceFactory metricsMasterProcSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class); MetricsMasterProcSource masterProcSource = metricsMasterProcSourceFactory.create(null); assertTrue(masterProcSource instanceof MetricsMasterProcSourceImpl); assertSame(metricsMasterProcSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); + CompatibilitySingletonFactory.getInstance(MetricsMasterProcSourceFactory.class)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java index 70ec90ab39a2..cbb031a7e6b2 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/master/TestMetricsMasterSourceImpl.java @@ -29,9 +29,9 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsMasterSourceImpl + * Test for MetricsMasterSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsMasterSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -39,11 +39,11 @@ public class TestMetricsMasterSourceImpl { @Test public void testGetInstance() { - MetricsMasterSourceFactory metricsMasterSourceFactory = CompatibilitySingletonFactory - .getInstance(MetricsMasterSourceFactory.class); + MetricsMasterSourceFactory metricsMasterSourceFactory = + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class); MetricsMasterSource masterSource = metricsMasterSourceFactory.create(null); assertTrue(masterSource instanceof MetricsMasterSourceImpl); - assertSame(metricsMasterSourceFactory, CompatibilitySingletonFactory.getInstance( - MetricsMasterSourceFactory.class)); + assertSame(metricsMasterSourceFactory, + CompatibilitySingletonFactory.getInstance(MetricsMasterSourceFactory.class)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java index 063071b43173..8a249e550892 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/metrics/TestBaseSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,9 +31,9 @@ import org.junit.experimental.categories.Category; /** - * Test of default BaseSource for hadoop 2 + * Test of default BaseSource for hadoop 2 */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestBaseSourceImpl { @ClassRule diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java index 56199f4d6117..9fcbd24f9ea4 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsTableWrapperStub.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import java.util.HashMap; diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java index 86a94baf72fd..b67162fdf99b 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServerSourceImpl.java @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionServerSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -38,14 +38,12 @@ public class TestMetricsRegionServerSourceImpl { public void testGetInstance() { MetricsRegionServerSourceFactory metricsRegionServerSourceFactory = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsRegionServerSource serverSource = - metricsRegionServerSourceFactory.createServer(null); + MetricsRegionServerSource serverSource = metricsRegionServerSourceFactory.createServer(null); assertTrue(serverSource instanceof MetricsRegionServerSourceImpl); assertSame(metricsRegionServerSourceFactory, - CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)); } - @Test(expected = RuntimeException.class) public void testNoGetRegionServerMetricsSourceImpl() { // This should throw an exception because MetricsRegionServerSourceImpl should only diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java index a802e8321c3e..bd6ea26be8e8 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java @@ -22,7 +22,6 @@ import java.util.HashMap; import java.util.Map; - import org.apache.hadoop.hbase.CompatibilitySingletonFactory; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MetricsTests; @@ -31,7 +30,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRegionSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -40,8 +39,8 @@ public class TestMetricsRegionSourceImpl { @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCodeEquals() { - MetricsRegionServerSourceFactory fact = CompatibilitySingletonFactory.getInstance( - MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsRegionSource one = fact.createRegion(new RegionWrapperStub("TEST")); MetricsRegionSource oneClone = fact.createRegion(new RegionWrapperStub("TEST")); diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java index 11177edcafb3..b6e8b17d7dcf 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsTableSourceImpl.java @@ -30,9 +30,9 @@ import org.junit.experimental.categories.Category; /** - * Test for MetricsTableSourceImpl + * Test for MetricsTableSourceImpl */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsTableSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -44,13 +44,12 @@ public void testCompareToHashCode() throws Exception { MetricsRegionServerSourceFactory metricsFact = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); - MetricsTableSource one = metricsFact.createTable( - "ONETABLE", new MetricsTableWrapperStub("ONETABLE")); - MetricsTableSource oneClone = metricsFact.createTable( - "ONETABLE", - new MetricsTableWrapperStub("ONETABLE")); - MetricsTableSource two = metricsFact.createTable( - "TWOTABLE", new MetricsTableWrapperStub("TWOTABLE")); + MetricsTableSource one = + metricsFact.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + MetricsTableSource oneClone = + metricsFact.createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + MetricsTableSource two = + metricsFact.createTable("TWOTABLE", new MetricsTableWrapperStub("TWOTABLE")); assertEquals(0, one.compareTo(oneClone)); assertEquals(one.hashCode(), oneClone.hashCode()); @@ -73,7 +72,7 @@ public void testNoGetTableMetricsSourceImpl() { public void testGetTableMetrics() { MetricsTableSource oneTbl = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class) - .createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); + .createTable("ONETABLE", new MetricsTableWrapperStub("ONETABLE")); assertEquals("ONETABLE", oneTbl.getTableName()); } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java index 8a72961edadc..e770c840eb17 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsUserSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.regionserver; import static org.junit.Assert.assertEquals; @@ -30,7 +29,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsUserSourceImpl { @ClassRule @@ -40,8 +39,8 @@ public class TestMetricsUserSourceImpl { @SuppressWarnings("SelfComparison") @Test public void testCompareToHashCodeEquals() throws Exception { - MetricsRegionServerSourceFactory fact - = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsUserSource one = fact.createUser("ONE"); MetricsUserSource oneClone = fact.createUser("ONE"); @@ -57,8 +56,7 @@ public void testCompareToHashCodeEquals() throws Exception { assertTrue(two.compareTo(two) == 0); } - - @Test (expected = RuntimeException.class) + @Test(expected = RuntimeException.class) public void testNoGetRegionServerMetricsSourceImpl() throws Exception { // This should throw an exception because MetricsUserSourceImpl should only // be created by a factory. @@ -67,8 +65,8 @@ public void testNoGetRegionServerMetricsSourceImpl() throws Exception { @Test public void testGetUser() { - MetricsRegionServerSourceFactory fact - = CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); + MetricsRegionServerSourceFactory fact = + CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class); MetricsUserSource one = fact.createUser("ONE"); assertEquals("ONE", one.getUser()); diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java index d8ec0af92bb0..e6c785ac9500 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/wal/TestMetricsWALSourceImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsWALSourceImpl { @ClassRule @@ -37,10 +37,8 @@ public class TestMetricsWALSourceImpl { @Test public void testGetInstance() throws Exception { - MetricsWALSource walSource = - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); + MetricsWALSource walSource = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class); assertTrue(walSource instanceof MetricsWALSourceImpl); - assertSame(walSource, - CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); + assertSame(walSource, CompatibilitySingletonFactory.getInstance(MetricsWALSource.class)); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java index 6cc26e2a4dd1..3b2403c23d20 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceFactoryImpl.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceFactoryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -35,8 +35,8 @@ public class TestMetricsReplicationSourceFactoryImpl { @Test public void testGetInstance() { - MetricsReplicationSourceFactory rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSourceFactory.class); + MetricsReplicationSourceFactory rms = + CompatibilitySingletonFactory.getInstance(MetricsReplicationSourceFactory.class); assertTrue(rms instanceof MetricsReplicationSourceFactoryImpl); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java index faff4b389176..b29280166964 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/replication/regionserver/TestMetricsReplicationSourceImpl.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsReplicationSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -35,8 +35,8 @@ public class TestMetricsReplicationSourceImpl { @Test public void testGetInstance() throws Exception { - MetricsReplicationSource rms = CompatibilitySingletonFactory - .getInstance(MetricsReplicationSource.class); + MetricsReplicationSource rms = + CompatibilitySingletonFactory.getInstance(MetricsReplicationSource.class); assertTrue(rms instanceof MetricsReplicationSourceImpl); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java index 2ac7996485e4..950d8ba2bcb5 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/rest/TestMetricsRESTSourceImpl.java @@ -31,7 +31,7 @@ /** * Test for hadoop 2's version of {@link MetricsRESTSource}. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsRESTSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -40,7 +40,7 @@ public class TestMetricsRESTSourceImpl { @Test public void ensureCompatRegistered() { assertNotNull(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class) - instanceof MetricsRESTSourceImpl); + assertTrue(CompatibilitySingletonFactory + .getInstance(MetricsRESTSource.class) instanceof MetricsRESTSourceImpl); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java index 83e25a636f07..7c7357c4f049 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/test/MetricsAssertHelperImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.test; import static org.junit.Assert.assertEquals; @@ -25,7 +24,6 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; - import org.apache.hadoop.hbase.metrics.BaseSource; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsCollector; @@ -36,7 +34,7 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; /** - * A helper class that will allow tests to get into hadoop2's metrics2 values. + * A helper class that will allow tests to get into hadoop2's metrics2 values. */ public class MetricsAssertHelperImpl implements MetricsAssertHelper { private Map tags = new HashMap<>(); @@ -203,8 +201,8 @@ public void assertCounterLt(String name, long expected, BaseSource source) { public long getCounter(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); - assertNotNull("Should get counter "+cName + " but did not",counters.get(cName)); - return counters.get(cName).longValue(); + assertNotNull("Should get counter " + cName + " but did not", counters.get(cName)); + return counters.get(cName).longValue(); } @Override @@ -225,8 +223,8 @@ public boolean checkGaugeExists(String name, BaseSource source) { public double getGaugeDouble(String name, BaseSource source) { getMetrics(source); String cName = canonicalizeMetricName(name); - assertNotNull("Should get gauge "+cName + " but did not",gauges.get(cName)); - return gauges.get(cName).doubleValue(); + assertNotNull("Should get gauge " + cName + " but did not", gauges.get(cName)); + return gauges.get(cName).doubleValue(); } @Override diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java index 7206810ab138..295d6f800f59 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/thrift/TestMetricsThriftServerSourceFactoryImpl.java @@ -32,7 +32,7 @@ /** * Test for hadoop 2's version of MetricsThriftServerSourceFactory. */ -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsThriftServerSourceFactoryImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -40,24 +40,24 @@ public class TestMetricsThriftServerSourceFactoryImpl { @Test public void testCompatabilityRegistered() { - assertNotNull(CompatibilitySingletonFactory.getInstance( - MetricsThriftServerSourceFactory.class)); - assertTrue(CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class) - instanceof MetricsThriftServerSourceFactoryImpl); + assertNotNull( + CompatibilitySingletonFactory.getInstance(MetricsThriftServerSourceFactory.class)); + assertTrue(CompatibilitySingletonFactory.getInstance( + MetricsThriftServerSourceFactory.class) instanceof MetricsThriftServerSourceFactoryImpl); } @Test public void testCreateThriftOneSource() { - //Make sure that the factory gives back a singleton. + // Make sure that the factory gives back a singleton. assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftOneSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); + new MetricsThriftServerSourceFactoryImpl().createThriftOneSource()); } @Test public void testCreateThriftTwoSource() { - //Make sure that the factory gives back a singleton. + // Make sure that the factory gives back a singleton. assertSame(new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource(), - new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); + new MetricsThriftServerSourceFactoryImpl().createThriftTwoSource()); } } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java index a199a78938a8..adb86b87facc 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/zookeeper/TestMetricsZooKeeperSourceImpl.java @@ -28,7 +28,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MetricsTests.class, SmallTests.class}) +@Category({ MetricsTests.class, SmallTests.class }) public class TestMetricsZooKeeperSourceImpl { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -37,7 +37,7 @@ public class TestMetricsZooKeeperSourceImpl { @Test public void testGetInstance() { MetricsZooKeeperSource zkSource = - CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); + CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class); assertTrue(zkSource instanceof MetricsZooKeeperSourceImpl); assertSame(zkSource, CompatibilitySingletonFactory.getInstance(MetricsZooKeeperSource.class)); } diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java index 1cf8702b7b12..96fe04b82e17 100644 --- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java +++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/metrics2/lib/TestMutableRangeHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,11 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.metrics2.lib; import static org.junit.Assert.assertEquals; +import java.util.ArrayList; +import java.util.List; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MetricsTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -30,15 +31,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import java.util.ArrayList; -import java.util.List; - @Category({ MetricsTests.class, SmallTests.class }) public class TestMutableRangeHistogram { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMutableRangeHistogram.class); + HBaseClassTestRule.forClass(TestMutableRangeHistogram.class); private static final String RECORD_NAME = "test"; private static final String SIZE_HISTOGRAM_NAME = "TestSize"; diff --git a/hbase-hbtop/pom.xml b/hbase-hbtop/pom.xml index 3559382fb918..e0b0d6cc2bdf 100644 --- a/hbase-hbtop/pom.xml +++ b/hbase-hbtop/pom.xml @@ -1,7 +1,5 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-hbtop Apache HBase - HBTop A real-time monitoring tool for HBase like Unix's top command - - - - - org.apache.maven.plugins - maven-source-plugin - - - org.apache.hbase @@ -107,4 +96,13 @@ test + + + + + org.apache.maven.plugins + maven-source-plugin + + + diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java index 9c1a000831a2..c725fe02859a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/HBTop.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,6 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; import org.apache.hbase.thirdparty.org.apache.commons.cli.Options; - /** * A real-time monitoring tool for HBase like Unix top command. */ @@ -154,7 +153,7 @@ public int run(String[] args) throws Exception { } Optional fieldInfo = initialMode.getFieldInfos().stream() - .filter(f -> f.getField().getHeader().equals(field)).findFirst(); + .filter(f -> f.getField().getHeader().equals(field)).findFirst(); if (fieldInfo.isPresent()) { initialSortField = fieldInfo.get().getField(); initialAscendingSort = ascendingSort; @@ -168,7 +167,7 @@ public int run(String[] args) throws Exception { initialFields = new ArrayList<>(); for (String field : fields) { Optional fieldInfo = initialMode.getFieldInfos().stream() - .filter(f -> f.getField().getHeader().equals(field)).findFirst(); + .filter(f -> f.getField().getHeader().equals(field)).findFirst(); if (fieldInfo.isPresent()) { initialFields.add(fieldInfo.get().getField()); } else { @@ -180,7 +179,7 @@ public int run(String[] args) throws Exception { if (commandLine.hasOption("filters")) { String[] filters = commandLine.getOptionValue("filters").split(","); List fields = initialMode.getFieldInfos().stream().map(FieldInfo::getField) - .collect(Collectors.toList()); + .collect(Collectors.toList()); for (String filter : filters) { RecordFilter f = RecordFilter.parse(filter, fields, false); if (f != null) { @@ -203,7 +202,7 @@ public int run(String[] args) throws Exception { } try (Screen screen = new Screen(getConf(), initialRefreshDelay, initialMode, initialFields, - initialSortField, initialAscendingSort, initialFilters, numberOfIterations, batchMode)) { + initialSortField, initialAscendingSort, initialFilters, numberOfIterations, batchMode)) { screen.run(); } @@ -212,19 +211,16 @@ public int run(String[] args) throws Exception { private Options getOptions() { Options opts = new Options(); - opts.addOption("h", "help", false, - "Print usage; for help while the tool is running press 'h'"); - opts.addOption("d", "delay", true, - "The refresh delay (in seconds); default is 3 seconds"); + opts.addOption("h", "help", false, "Print usage; for help while the tool is running press 'h'"); + opts.addOption("d", "delay", true, "The refresh delay (in seconds); default is 3 seconds"); opts.addOption("m", "mode", true, "The mode; n (Namespace)|t (Table)|r (Region)|s (RegionServer)|u (User)" - + "|c (Client), default is r"); - opts.addOption("n", "numberOfIterations", true, - "The number of iterations"); + + "|c (Client), default is r"); + opts.addOption("n", "numberOfIterations", true, "The number of iterations"); opts.addOption("s", "sortField", true, "The initial sort field. You can prepend a `+' or `-' to the field name to also override" - + " the sort direction. A leading `+' will force sorting high to low, whereas a `-' will" - + " ensure a low to high ordering"); + + " the sort direction. A leading `+' will force sorting high to low, whereas a `-' will" + + " ensure a low to high ordering"); opts.addOption("O", "outputFieldNames", false, "Print each of the available field names on a separate line, then quit"); opts.addOption("f", "fields", true, @@ -233,8 +229,8 @@ private Options getOptions() { "The initial filters. Specify comma separated filters to set multiple filters"); opts.addOption("b", "batchMode", false, "Starts hbtop in Batch mode, which could be useful for sending output from hbtop to other" - + " programs or to a file. In this mode, hbtop will not accept input and runs until the" - + " iterations limit you've set with the `-n' command-line option or until killed"); + + " programs or to a file. In this mode, hbtop will not accept input and runs until the" + + " iterations limit you've set with the `-n' command-line option or until killed"); return opts; } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java index 577172a38cb2..3331cd03550f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/Record.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -94,7 +94,8 @@ public static Record ofEntries(Entry... entries) { } public static Record ofEntries(Stream entries) { - return entries.collect(Record::builder, Builder::put, (r1, r2) -> {}).build(); + return entries.collect(Record::builder, Builder::put, (r1, r2) -> { + }).build(); } private Record(ImmutableMap values) { @@ -165,12 +166,11 @@ public Set> entrySet() { } public Record combine(Record o) { - return ofEntries(values.keySet().stream() - .map(k -> { - if (k.getFieldValueType() == FieldValueType.STRING) { - return entry(k, values.get(k)); - } - return entry(k, values.get(k).plus(o.values.get(k))); - })); + return ofEntries(values.keySet().stream().map(k -> { + if (k.getFieldValueType() == FieldValueType.STRING) { + return entry(k, values.get(k)); + } + return entry(k, values.get(k).plus(o.values.get(k))); + })); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java index 78adf7cce009..c6d4399b8322 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/RecordFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldValue; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a filter that's filtering the metric {@link Record}s. */ @@ -32,11 +31,7 @@ public final class RecordFilter { private enum Operator { - EQUAL("="), - DOUBLE_EQUALS("=="), - GREATER(">"), - GREATER_OR_EQUAL(">="), - LESS("<"), + EQUAL("="), DOUBLE_EQUALS("=="), GREATER(">"), GREATER_OR_EQUAL(">="), LESS("<"), LESS_OR_EQUAL("<="); private final String operator; @@ -68,7 +63,7 @@ public static RecordFilter parse(String filterString, List fields, boolea StringBuilder fieldString = new StringBuilder(); while (filterString.length() > index && filterString.charAt(index) != '<' - && filterString.charAt(index) != '>' && filterString.charAt(index) != '=') { + && filterString.charAt(index) != '>' && filterString.charAt(index) != '=') { fieldString.append(filterString.charAt(index++)); } @@ -82,8 +77,8 @@ public static RecordFilter parse(String filterString, List fields, boolea } StringBuilder operatorString = new StringBuilder(); - while (filterString.length() > index && (filterString.charAt(index) == '<' || - filterString.charAt(index) == '>' || filterString.charAt(index) == '=')) { + while (filterString.length() > index && (filterString.charAt(index) == '<' + || filterString.charAt(index) == '>' || filterString.charAt(index) == '=')) { operatorString.append(filterString.charAt(index++)); } @@ -138,7 +133,7 @@ private static Operator getOperator(String operatorString) { private final FieldValue value; private RecordFilter(boolean ignoreCase, boolean not, Field field, Operator operator, - FieldValue value) { + FieldValue value) { this.ignoreCase = ignoreCase; this.not = not; this.field = Objects.requireNonNull(field); @@ -166,8 +161,7 @@ public boolean execute(Record record) { return not != ret; } - int compare = ignoreCase ? - fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value); + int compare = ignoreCase ? fieldValue.compareToIgnoreCase(value) : fieldValue.compareTo(value); boolean ret; switch (operator) { @@ -212,7 +206,7 @@ public boolean equals(Object o) { } RecordFilter filter = (RecordFilter) o; return ignoreCase == filter.ignoreCase && not == filter.not && field == filter.field - && operator == filter.operator && value.equals(filter.value); + && operator == filter.operator && value.equals(filter.value); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java index df460dd31cf2..8874bc6853e0 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/Field.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents fields that are displayed in the top screen. */ @@ -34,27 +33,25 @@ public enum Field { REGION("REGION", "Encoded Region Name", false, true, FieldValueType.STRING), REGION_SERVER("RS", "Short Region Server Name", true, true, FieldValueType.STRING), LONG_REGION_SERVER("LRS", "Long Region Server Name", true, true, FieldValueType.STRING), - REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false, - FieldValueType.LONG), + REQUEST_COUNT_PER_SECOND("#REQ/S", "Request Count per second", false, false, FieldValueType.LONG), READ_REQUEST_COUNT_PER_SECOND("#READ/S", "Read Request Count per second", false, false, - FieldValueType.LONG), + FieldValueType.LONG), FILTERED_READ_REQUEST_COUNT_PER_SECOND("#FREAD/S", "Filtered Read Request Count per second", - false, false, FieldValueType.LONG), + false, false, FieldValueType.LONG), WRITE_REQUEST_COUNT_PER_SECOND("#WRITE/S", "Write Request Count per second", false, false, - FieldValueType.LONG), + FieldValueType.LONG), STORE_FILE_SIZE("SF", "StoreFile Size", false, false, FieldValueType.SIZE), UNCOMPRESSED_STORE_FILE_SIZE("USF", "Uncompressed StoreFile Size", false, false, - FieldValueType.SIZE), + FieldValueType.SIZE), NUM_STORE_FILES("#SF", "Number of StoreFiles", false, false, FieldValueType.INTEGER), MEM_STORE_SIZE("MEMSTORE", "MemStore Size", false, false, FieldValueType.SIZE), LOCALITY("LOCALITY", "Block Locality", false, false, FieldValueType.FLOAT), START_KEY("SKEY", "Start Key", true, true, FieldValueType.STRING), - COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false, - FieldValueType.LONG), + COMPACTING_CELL_COUNT("#COMPingCELL", "Compacting Cell Count", false, false, FieldValueType.LONG), COMPACTED_CELL_COUNT("#COMPedCELL", "Compacted Cell Count", false, false, FieldValueType.LONG), COMPACTION_PROGRESS("%COMP", "Compaction Progress", false, false, FieldValueType.PERCENT), LAST_MAJOR_COMPACTION_TIME("LASTMCOMP", "Last Major Compaction Time", false, true, - FieldValueType.STRING), + FieldValueType.STRING), REGION_COUNT("#REGION", "Region Count", false, false, FieldValueType.INTEGER), USED_HEAP_SIZE("UHEAP", "Used Heap Size", false, false, FieldValueType.SIZE), USER("USER", "user Name", true, true, FieldValueType.STRING), @@ -70,7 +67,7 @@ public enum Field { private final FieldValueType fieldValueType; Field(String header, String description, boolean autoAdjust, boolean leftJustify, - FieldValueType fieldValueType) { + FieldValueType fieldValueType) { this.header = Objects.requireNonNull(header); this.description = Objects.requireNonNull(description); this.autoAdjust = autoAdjust; diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java index 3f0e5f7ad1d3..ad153210dd9e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,14 +20,11 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** - * Information about a field. - * - * This has a {@link Field} itself and additional information (e.g. {@code defaultLength} and - * {@code displayByDefault}). This additional information is different between the - * {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. That's why the - * additional information is separated from {@link Field}. + * Information about a field. This has a {@link Field} itself and additional information (e.g. + * {@code defaultLength} and {@code displayByDefault}). This additional information is different + * between the {@link org.apache.hadoop.hbase.hbtop.mode.Mode}s even when the field is the same. + * That's why the additional information is separated from {@link Field}. */ @InterfaceAudience.Private public class FieldInfo { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java index 086dadc3e290..43fbf6498384 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,11 +22,8 @@ import org.apache.hadoop.hbase.Size; import org.apache.yetus.audience.InterfaceAudience; - /** - * Represents a value of a field. - * - * The type of a value is defined by {@link FieldValue}. + * Represents a value of a field. The type of a value is defined by {@link FieldValue}. */ @InterfaceAudience.Private public final class FieldValue implements Comparable { @@ -103,23 +100,23 @@ public final class FieldValue implements Comparable { private Size optimizeSize(Size size) { if (size.get(Size.Unit.BYTE) < 1024d) { - return size.getUnit() == Size.Unit.BYTE ? - size : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE); + return size.getUnit() == Size.Unit.BYTE ? size + : new Size(size.get(Size.Unit.BYTE), Size.Unit.BYTE); } else if (size.get(Size.Unit.KILOBYTE) < 1024d) { - return size.getUnit() == Size.Unit.KILOBYTE ? - size : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE); + return size.getUnit() == Size.Unit.KILOBYTE ? size + : new Size(size.get(Size.Unit.KILOBYTE), Size.Unit.KILOBYTE); } else if (size.get(Size.Unit.MEGABYTE) < 1024d) { - return size.getUnit() == Size.Unit.MEGABYTE ? - size : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE); + return size.getUnit() == Size.Unit.MEGABYTE ? size + : new Size(size.get(Size.Unit.MEGABYTE), Size.Unit.MEGABYTE); } else if (size.get(Size.Unit.GIGABYTE) < 1024d) { - return size.getUnit() == Size.Unit.GIGABYTE ? - size : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE); + return size.getUnit() == Size.Unit.GIGABYTE ? size + : new Size(size.get(Size.Unit.GIGABYTE), Size.Unit.GIGABYTE); } else if (size.get(Size.Unit.TERABYTE) < 1024d) { - return size.getUnit() == Size.Unit.TERABYTE ? - size : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE); + return size.getUnit() == Size.Unit.TERABYTE ? size + : new Size(size.get(Size.Unit.TERABYTE), Size.Unit.TERABYTE); } - return size.getUnit() == Size.Unit.PETABYTE ? - size : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE); + return size.getUnit() == Size.Unit.PETABYTE ? size + : new Size(size.get(Size.Unit.PETABYTE), Size.Unit.PETABYTE); } private Size parseSizeString(String sizeString) { @@ -133,7 +130,7 @@ private Size parseSizeString(String sizeString) { } private Size.Unit convertToUnit(String unitSimpleName) { - for (Size.Unit unit: Size.Unit.values()) { + for (Size.Unit unit : Size.Unit.values()) { if (unitSimpleName.equals(unit.getSimpleName())) { return unit; } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java index e2edae87b800..63597da584b9 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/field/FieldValueType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the type of a {@link FieldValue}. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java index fe3edd1b2544..9b8f9a518d37 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ClientModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.UserMetrics; @@ -41,87 +40,88 @@ /** * Implementation for {@link ModeStrategy} for client Mode. */ -@InterfaceAudience.Private public final class ClientModeStrategy implements ModeStrategy { +@InterfaceAudience.Private +public final class ClientModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays - .asList(new FieldInfo(Field.CLIENT, 0, true), - new FieldInfo(Field.USER_COUNT, 5, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.CLIENT, 0, true), new FieldInfo(Field.USER_COUNT, 5, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); private final Map requestCountPerSecondMap = new HashMap<>(); ClientModeStrategy() { } - @Override public List getFieldInfos() { + @Override + public List getFieldInfos() { return fieldInfos; } - @Override public Field getDefaultSortField() { + @Override + public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { List records = createRecords(clusterMetrics); return aggregateRecordsAndAddDistinct( - ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.CLIENT, Field.USER, - Field.USER_COUNT); + ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.CLIENT, Field.USER, + Field.USER_COUNT); } List createRecords(ClusterMetrics clusterMetrics) { List ret = new ArrayList<>(); for (ServerMetrics serverMetrics : clusterMetrics.getLiveServerMetrics().values()) { long lastReportTimestamp = serverMetrics.getLastReportTimestamp(); - serverMetrics.getUserMetrics().values().forEach(um -> um.getClientMetrics().values().forEach( - clientMetrics -> ret.add( - createRecord(um.getNameAsString(), clientMetrics, lastReportTimestamp, - serverMetrics.getServerName().getServerName())))); + serverMetrics.getUserMetrics().values() + .forEach(um -> um.getClientMetrics().values() + .forEach(clientMetrics -> ret.add(createRecord(um.getNameAsString(), clientMetrics, + lastReportTimestamp, serverMetrics.getServerName().getServerName())))); } return ret; } /** * Aggregate the records and count the unique values for the given distinctField - * - * @param records records to be processed - * @param groupBy Field on which group by needs to be done - * @param distinctField Field whose unique values needs to be counted + * @param records records to be processed + * @param groupBy Field on which group by needs to be done + * @param distinctField Field whose unique values needs to be counted * @param uniqueCountAssignedTo a target field to which the unique count is assigned to * @return aggregated records */ List aggregateRecordsAndAddDistinct(List records, Field groupBy, Field distinctField, Field uniqueCountAssignedTo) { List result = new ArrayList<>(); - records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values() - .forEach(val -> { - Set distinctValues = new HashSet<>(); - Map map = new HashMap<>(); - for (Record record : val) { - for (Map.Entry field : record.entrySet()) { - if (distinctField.equals(field.getKey())) { - //We will not be adding the field in the new record whose distinct count is required - distinctValues.add(record.get(distinctField)); + records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).values().forEach(val -> { + Set distinctValues = new HashSet<>(); + Map map = new HashMap<>(); + for (Record record : val) { + for (Map.Entry field : record.entrySet()) { + if (distinctField.equals(field.getKey())) { + // We will not be adding the field in the new record whose distinct count is required + distinctValues.add(record.get(distinctField)); + } else { + if (field.getKey().getFieldValueType() == FieldValueType.STRING) { + map.put(field.getKey(), field.getValue()); + } else { + if (map.get(field.getKey()) == null) { + map.put(field.getKey(), field.getValue()); } else { - if (field.getKey().getFieldValueType() == FieldValueType.STRING) { - map.put(field.getKey(), field.getValue()); - } else { - if (map.get(field.getKey()) == null) { - map.put(field.getKey(), field.getValue()); - } else { - map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue())); - } - } + map.put(field.getKey(), map.get(field.getKey()).plus(field.getValue())); } } } - // Add unique count field - map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size())); - result.add(Record.ofEntries(map.entrySet().stream() - .map(k -> Record.entry(k.getKey(), k.getValue())))); - }); + } + } + // Add unique count field + map.put(uniqueCountAssignedTo, uniqueCountAssignedTo.newValue(distinctValues.size())); + result.add( + Record.ofEntries(map.entrySet().stream().map(k -> Record.entry(k.getKey(), k.getValue())))); + }); return result; } @@ -137,21 +137,22 @@ Record createRecord(String user, UserMetrics.ClientMetrics clientMetrics, requestCountPerSecondMap.put(mapKey, requestCountPerSecond); } requestCountPerSecond.refresh(lastReportTimestamp, clientMetrics.getReadRequestsCount(), - clientMetrics.getFilteredReadRequestsCount(), clientMetrics.getWriteRequestsCount()); + clientMetrics.getFilteredReadRequestsCount(), clientMetrics.getWriteRequestsCount()); builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getReadRequestCountPerSecond()); + requestCountPerSecond.getReadRequestCountPerSecond()); builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getWriteRequestCountPerSecond()); + requestCountPerSecond.getWriteRequestCountPerSecond()); builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getFilteredReadRequestCountPerSecond()); + requestCountPerSecond.getFilteredReadRequestCountPerSecond()); builder.put(Field.USER, user); return builder.build(); } - @Override public DrillDownInfo drillDown(Record selectedRecord) { + @Override + public DrillDownInfo drillDown(Record selectedRecord) { List initialFilters = Collections.singletonList( - RecordFilter.newBuilder(Field.CLIENT).doubleEquals(selectedRecord.get(Field.CLIENT))); + RecordFilter.newBuilder(Field.CLIENT).doubleEquals(selectedRecord.get(Field.CLIENT))); return new DrillDownInfo(Mode.USER, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java index de3d582fb9f1..7061d5374e88 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/DrillDownInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,15 +21,12 @@ import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.yetus.audience.InterfaceAudience; - /** - * Information about drilling down. - * - * When drilling down, going to next {@link Mode} with initial {@link RecordFilter}s. + * Information about drilling down. When drilling down, going to next {@link Mode} with initial + * {@link RecordFilter}s. */ @InterfaceAudience.Private public class DrillDownInfo { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java index ffd98dfd6837..58ed8428c18e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/Mode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,7 +27,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a display mode in the top screen. */ @@ -45,7 +44,7 @@ public enum Mode { private final ModeStrategy modeStrategy; Mode(String header, String description, ModeStrategy modeStrategy) { - this.header = Objects.requireNonNull(header); + this.header = Objects.requireNonNull(header); this.description = Objects.requireNonNull(description); this.modeStrategy = Objects.requireNonNull(modeStrategy); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java index 021cee25810a..db58f1facae5 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,14 +26,17 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * An interface for strategy logic for {@link Mode}. */ @InterfaceAudience.Private interface ModeStrategy { List getFieldInfos(); + Field getDefaultSortField(); + List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters); - @Nullable DrillDownInfo drillDown(Record selectedRecord); + + @Nullable + DrillDownInfo drillDown(Record selectedRecord); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java index 9175820e0cae..b426c0b949b9 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/ModeStrategyUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.List; import java.util.stream.Collectors; import java.util.stream.Stream; - import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; import org.apache.hadoop.hbase.hbtop.field.Field; @@ -36,8 +35,7 @@ private ModeStrategyUtils() { * @param filters List of filters * @return filtered records */ - public static List applyFilterAndGet(List records, - List filters) { + public static List applyFilterAndGet(List records, List filters) { if (filters != null && !filters.isEmpty()) { return records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) .collect(Collectors.toList()); @@ -45,19 +43,18 @@ public static List applyFilterAndGet(List records, return records; } - /** - * Group by records on the basis of supplied groupBy field and - * Aggregate records using {@link Record#combine(Record)} - * + * Group by records on the basis of supplied groupBy field and Aggregate records using + * {@link Record#combine(Record)} * @param records records needs to be processed * @param groupBy Field to be used for group by * @return aggregated records */ public static List aggregateRecords(List records, Field groupBy) { return records.stream().collect(Collectors.groupingBy(r -> r.get(groupBy))).entrySet().stream() - .flatMap(e -> e.getValue().stream().reduce(Record::combine).map(Stream::of) - .orElse(Stream.empty())).collect(Collectors.toList()); + .flatMap( + e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) + .collect(Collectors.toList()); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java index f74d8bf22ebc..7be2518fe362 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/NamespaceModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; @@ -28,15 +27,13 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Namespace Mode. */ @InterfaceAudience.Private public final class NamespaceModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.NAMESPACE, 0, true), + private final List fieldInfos = Arrays.asList(new FieldInfo(Field.NAMESPACE, 0, true), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), @@ -44,13 +41,11 @@ public final class NamespaceModeStrategy implements ModeStrategy { new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); - NamespaceModeStrategy(){ + NamespaceModeStrategy() { } @Override @@ -63,11 +58,12 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by NAMESPACE field return ModeStrategyUtils.aggregateRecords(records, Field.NAMESPACE); @@ -75,9 +71,8 @@ public Field getDefaultSortField() { @Override public DrillDownInfo drillDown(Record selectedRecord) { - List initialFilters = - Collections.singletonList(RecordFilter.newBuilder(Field.NAMESPACE) - .doubleEquals(selectedRecord.get(Field.NAMESPACE))); + List initialFilters = Collections.singletonList( + RecordFilter.newBuilder(Field.NAMESPACE).doubleEquals(selectedRecord.get(Field.NAMESPACE))); return new DrillDownInfo(Mode.TABLE, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java index 0adbc823bf4c..ff8802b4dbbc 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.RegionMetrics; @@ -39,7 +38,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Region Mode. */ @@ -47,29 +45,22 @@ public final class RegionModeStrategy implements ModeStrategy { private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.REGION_NAME, 0, false), - new FieldInfo(Field.NAMESPACE, 0, true), - new FieldInfo(Field.TABLE, 0, true), - new FieldInfo(Field.START_CODE, 13, false), - new FieldInfo(Field.REPLICA_ID, 5, false), - new FieldInfo(Field.REGION, 32, true), - new FieldInfo(Field.REGION_SERVER, 0, true), - new FieldInfo(Field.LONG_REGION_SERVER, 0, false), + new FieldInfo(Field.REGION_NAME, 0, false), new FieldInfo(Field.NAMESPACE, 0, true), + new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.START_CODE, 13, false), + new FieldInfo(Field.REPLICA_ID, 5, false), new FieldInfo(Field.REGION, 32, true), + new FieldInfo(Field.REGION_SERVER, 0, true), new FieldInfo(Field.LONG_REGION_SERVER, 0, false), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.STORE_FILE_SIZE, 10, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 12, false), - new FieldInfo(Field.NUM_STORE_FILES,4, true), - new FieldInfo(Field.MEM_STORE_SIZE, 8, true), - new FieldInfo(Field.LOCALITY, 8, true), - new FieldInfo(Field.START_KEY, 0, false), + new FieldInfo(Field.NUM_STORE_FILES, 4, true), new FieldInfo(Field.MEM_STORE_SIZE, 8, true), + new FieldInfo(Field.LOCALITY, 8, true), new FieldInfo(Field.START_KEY, 0, false), new FieldInfo(Field.COMPACTING_CELL_COUNT, 12, false), new FieldInfo(Field.COMPACTED_CELL_COUNT, 12, false), new FieldInfo(Field.COMPACTION_PROGRESS, 7, false), - new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false) - ); + new FieldInfo(Field.LAST_MAJOR_COMPACTION_TIME, 19, false)); private final Map requestCountPerSecondMap = new HashMap<>(); @@ -86,7 +77,8 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { List ret = new ArrayList<>(); for (ServerMetrics sm : clusterMetrics.getLiveServerMetrics().values()) { @@ -99,7 +91,7 @@ public Field getDefaultSortField() { } private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMetrics, - long lastReportTimestamp) { + long lastReportTimestamp) { Record.Builder builder = Record.builder(); @@ -119,8 +111,8 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet tableName = tn.getQualifierAsString(); startKey = Bytes.toStringBinary(elements[1]); startCode = Bytes.toString(elements[2]); - replicaId = elements.length == 4 ? - Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; + replicaId = + elements.length == 4 ? Integer.valueOf(Bytes.toString(elements[3])).toString() : ""; region = RegionInfo.encodeRegionName(regionMetrics.getRegionName()); } catch (IOException ignored) { } @@ -145,11 +137,10 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet builder.put(Field.READ_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getReadRequestCountPerSecond()); builder.put(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getFilteredReadRequestCountPerSecond()); + requestCountPerSecond.getFilteredReadRequestCountPerSecond()); builder.put(Field.WRITE_REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getWriteRequestCountPerSecond()); - builder.put(Field.REQUEST_COUNT_PER_SECOND, - requestCountPerSecond.getRequestCountPerSecond()); + builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond.getRequestCountPerSecond()); builder.put(Field.STORE_FILE_SIZE, regionMetrics.getStoreFileSize()); builder.put(Field.UNCOMPRESSED_STORE_FILE_SIZE, regionMetrics.getUncompressedStoreFileSize()); @@ -160,7 +151,7 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet long compactingCellCount = regionMetrics.getCompactingCellCount(); long compactedCellCount = regionMetrics.getCompactedCellCount(); float compactionProgress = 0; - if (compactedCellCount > 0) { + if (compactedCellCount > 0) { compactionProgress = 100 * ((float) compactedCellCount / compactingCellCount); } @@ -178,22 +169,20 @@ private Record createRecord(ServerMetrics serverMetrics, RegionMetrics regionMet } /** - * Form new record list with records formed by only fields provided through fieldInfo and - * add a count field for each record with value 1 - * We are doing two operation of selecting and adding new field - * because of saving some CPU cycles on rebuilding the record again - * + * Form new record list with records formed by only fields provided through fieldInfo and add a + * count field for each record with value 1 We are doing two operation of selecting and adding new + * field because of saving some CPU cycles on rebuilding the record again * @param fieldInfos List of FieldInfos required in the record - * @param records List of records which needs to be processed + * @param records List of records which needs to be processed * @param countField Field which needs to be added with value 1 for each record * @return records after selecting required fields and adding count field */ List selectModeFieldsAndAddCountField(List fieldInfos, List records, Field countField) { - return records.stream().map(record -> Record.ofEntries( - fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) - .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) + return records.stream().map( + record -> Record.ofEntries(fieldInfos.stream().filter(fi -> record.containsKey(fi.getField())) + .map(fi -> Record.entry(fi.getField(), record.get(fi.getField()))))) .map(record -> Record.builder().putAll(record).put(countField, 1).build()) .collect(Collectors.toList()); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java index 44a9a2c82711..1618bf3fc801 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RegionServerModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.stream.Collectors; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.ServerMetrics; import org.apache.hadoop.hbase.hbtop.Record; @@ -32,7 +31,6 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for RegionServer Mode. */ @@ -40,8 +38,7 @@ public final class RegionServerModeStrategy implements ModeStrategy { private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.REGION_SERVER, 0, true), - new FieldInfo(Field.LONG_REGION_SERVER, 0, false), + new FieldInfo(Field.REGION_SERVER, 0, true), new FieldInfo(Field.LONG_REGION_SERVER, 0, false), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), @@ -49,15 +46,12 @@ public final class RegionServerModeStrategy implements ModeStrategy { new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true), - new FieldInfo(Field.USED_HEAP_SIZE, 11, true), - new FieldInfo(Field.MAX_HEAP_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true), + new FieldInfo(Field.USED_HEAP_SIZE, 11, true), new FieldInfo(Field.MAX_HEAP_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); - RegionServerModeStrategy(){ + RegionServerModeStrategy() { } @Override @@ -70,11 +64,12 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by LONG_REGION_SERVER field Map retMap = ModeStrategyUtils.aggregateRecords(records, Field.LONG_REGION_SERVER).stream() @@ -87,9 +82,9 @@ public Field getDefaultSortField() { continue; } - Record newRecord = Record.builder().putAll(record) - .put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()) - .put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build(); + Record newRecord = + Record.builder().putAll(record).put(Field.USED_HEAP_SIZE, sm.getUsedHeapSize()) + .put(Field.MAX_HEAP_SIZE, sm.getMaxHeapSize()).build(); retMap.put(sm.getServerName().getServerName(), newRecord); } @@ -100,8 +95,7 @@ public Field getDefaultSortField() { @Override public DrillDownInfo drillDown(Record selectedRecord) { List initialFilters = Collections.singletonList(RecordFilter - .newBuilder(Field.REGION_SERVER) - .doubleEquals(selectedRecord.get(Field.REGION_SERVER))); + .newBuilder(Field.REGION_SERVER).doubleEquals(selectedRecord.get(Field.REGION_SERVER))); return new DrillDownInfo(Mode.REGION, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java index d546070db71d..ade3f7d5e433 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/RequestCountPerSecond.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for calculating request counts per second. */ @@ -34,7 +33,7 @@ public class RequestCountPerSecond { private long writeRequestCountPerSecond; public void refresh(long lastReportTimestamp, long readRequestCount, - long filteredReadRequestCount, long writeRequestCount) { + long filteredReadRequestCount, long writeRequestCount) { if (previousLastReportTimestamp == 0) { previousLastReportTimestamp = lastReportTimestamp; previousReadRequestCount = readRequestCount; @@ -47,7 +46,7 @@ public void refresh(long lastReportTimestamp, long readRequestCount, } readRequestCountPerSecond = (readRequestCount - previousReadRequestCount) / delta; filteredReadRequestCountPerSecond = - (filteredReadRequestCount - previousFilteredReadRequestCount) / delta; + (filteredReadRequestCount - previousFilteredReadRequestCount) / delta; writeRequestCountPerSecond = (writeRequestCount - previousWriteRequestCount) / delta; previousLastReportTimestamp = lastReportTimestamp; diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java index 4acc34412584..954401352e2d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/TableModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,26 +29,21 @@ import org.apache.hadoop.hbase.hbtop.field.FieldInfo; import org.apache.yetus.audience.InterfaceAudience; - /** * Implementation for {@link ModeStrategy} for Table Mode. */ @InterfaceAudience.Private public final class TableModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays.asList( - new FieldInfo(Field.NAMESPACE, 0, true), - new FieldInfo(Field.TABLE, 0, true), - new FieldInfo(Field.REGION_COUNT, 7, true), + private final List fieldInfos = Arrays.asList(new FieldInfo(Field.NAMESPACE, 0, true), + new FieldInfo(Field.TABLE, 0, true), new FieldInfo(Field.REGION_COUNT, 7, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 8, true), new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), new FieldInfo(Field.STORE_FILE_SIZE, 13, true), new FieldInfo(Field.UNCOMPRESSED_STORE_FILE_SIZE, 15, false), - new FieldInfo(Field.NUM_STORE_FILES, 7, true), - new FieldInfo(Field.MEM_STORE_SIZE, 11, true) - ); + new FieldInfo(Field.NUM_STORE_FILES, 7, true), new FieldInfo(Field.MEM_STORE_SIZE, 11, true)); private final RegionModeStrategy regionModeStrategy = new RegionModeStrategy(); @@ -65,26 +60,22 @@ public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { // Get records from RegionModeStrategy and add REGION_COUNT field List records = regionModeStrategy.selectModeFieldsAndAddCountField(fieldInfos, - regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); + regionModeStrategy.getRecords(clusterMetrics, pushDownFilters), Field.REGION_COUNT); // Aggregation by NAMESPACE field and TABLE field - return records.stream() - .collect(Collectors.groupingBy(r -> { - String namespace = r.get(Field.NAMESPACE).asString(); - String table = r.get(Field.TABLE).asString(); - return TableName.valueOf(namespace, table); - })) - .entrySet().stream() - .flatMap( - e -> e.getValue().stream() - .reduce(Record::combine) - .map(Stream::of) - .orElse(Stream.empty())) - .collect(Collectors.toList()); + return records.stream().collect(Collectors.groupingBy(r -> { + String namespace = r.get(Field.NAMESPACE).asString(); + String table = r.get(Field.TABLE).asString(); + return TableName.valueOf(namespace, table); + })).entrySet().stream() + .flatMap( + e -> e.getValue().stream().reduce(Record::combine).map(Stream::of).orElse(Stream.empty())) + .collect(Collectors.toList()); } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java index 605376e12218..bf57daf96b48 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/mode/UserModeStrategy.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; - import org.apache.hadoop.hbase.ClusterMetrics; import org.apache.hadoop.hbase.hbtop.Record; import org.apache.hadoop.hbase.hbtop.RecordFilter; @@ -31,40 +30,44 @@ /** * Implementation for {@link ModeStrategy} for User Mode. */ -@InterfaceAudience.Private public final class UserModeStrategy implements ModeStrategy { +@InterfaceAudience.Private +public final class UserModeStrategy implements ModeStrategy { - private final List fieldInfos = Arrays - .asList(new FieldInfo(Field.USER, 0, true), - new FieldInfo(Field.CLIENT_COUNT, 7, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); + private final List fieldInfos = + Arrays.asList(new FieldInfo(Field.USER, 0, true), new FieldInfo(Field.CLIENT_COUNT, 7, true), + new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.READ_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.WRITE_REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND, 10, true)); private final ClientModeStrategy clientModeStrategy = new ClientModeStrategy(); UserModeStrategy() { } - @Override public List getFieldInfos() { + @Override + public List getFieldInfos() { return fieldInfos; } - @Override public Field getDefaultSortField() { + @Override + public Field getDefaultSortField() { return Field.REQUEST_COUNT_PER_SECOND; } - @Override public List getRecords(ClusterMetrics clusterMetrics, + @Override + public List getRecords(ClusterMetrics clusterMetrics, List pushDownFilters) { List records = clientModeStrategy.createRecords(clusterMetrics); return clientModeStrategy.aggregateRecordsAndAddDistinct( - ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.USER, Field.CLIENT, - Field.CLIENT_COUNT); + ModeStrategyUtils.applyFilterAndGet(records, pushDownFilters), Field.USER, Field.CLIENT, + Field.CLIENT_COUNT); } - @Override public DrillDownInfo drillDown(Record selectedRecord) { - //Drill down to client and using selected USER as a filter + @Override + public DrillDownInfo drillDown(Record selectedRecord) { + // Drill down to client and using selected USER as a filter List initialFilters = Collections.singletonList( - RecordFilter.newBuilder(Field.USER).doubleEquals(selectedRecord.get(Field.USER))); + RecordFilter.newBuilder(Field.USER).doubleEquals(selectedRecord.get(Field.USER))); return new DrillDownInfo(Mode.CLIENT, initialFilters); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java index 8b55d6ec0df3..4620d0896c2c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/AbstractScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.yetus.audience.InterfaceAudience; - /** * An abstract class for {@link ScreenView} that has the common useful methods and the default * implementations for the abstract methods. diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java index 2846c25d1cc4..893a64dd465d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/Screen.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * This dispatches key presses and timers to the current {@link ScreenView}. */ @@ -56,10 +55,9 @@ public class Screen implements Closeable { private Long timerTimestamp; public Screen(Configuration conf, long initialRefreshDelay, Mode initialMode, - @Nullable List initialFields, @Nullable Field initialSortField, - @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, - long numberOfIterations, boolean batchMode) - throws IOException { + @Nullable List initialFields, @Nullable Field initialSortField, + @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, + long numberOfIterations, boolean batchMode) throws IOException { connection = ConnectionFactory.createConnection(conf); admin = connection.getAdmin(); @@ -69,9 +67,8 @@ public Screen(Configuration conf, long initialRefreshDelay, Mode initialMode, } else { terminal = new TerminalImpl("hbtop"); } - currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin, - initialMode, initialFields, initialSortField, initialAscendingSort, initialFilters, - numberOfIterations); + currentScreenView = new TopScreenView(this, terminal, initialRefreshDelay, admin, initialMode, + initialFields, initialSortField, initialAscendingSort, initialFilters, numberOfIterations); } @Override @@ -106,7 +103,7 @@ public void run() { nextScreenView = currentScreenView.handleTimer(); } else { TimeUnit.MILLISECONDS - .sleep(Math.min(timerTimestamp - now, SLEEP_TIMEOUT_MILLISECONDS)); + .sleep(Math.min(timerTimestamp - now, SLEEP_TIMEOUT_MILLISECONDS)); continue; } } else { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java index f061bff831d4..9291cedb7db4 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/ScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,13 +21,16 @@ import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.yetus.audience.InterfaceAudience; - /** * An interface for a screen view that handles key presses and timers. */ @InterfaceAudience.Private public interface ScreenView { void init(); - @Nullable ScreenView handleKeyPress(KeyPress keyPress); - @Nullable ScreenView handleTimer(); + + @Nullable + ScreenView handleKeyPress(KeyPress keyPress); + + @Nullable + ScreenView handleTimer(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java index 45f5fd01efb7..f9f386ba1723 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,12 +21,10 @@ import java.util.EnumMap; import java.util.List; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the field screen. */ @@ -52,8 +50,8 @@ public interface ResultListener { private boolean moveMode; public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, List fields, - EnumMap fieldDisplayMap, ResultListener resultListener, - ScreenView nextScreenView) { + EnumMap fieldDisplayMap, ResultListener resultListener, + ScreenView nextScreenView) { this.fieldScreenView = Objects.requireNonNull(fieldScreenView); this.sortField = Objects.requireNonNull(sortField); this.fields = new ArrayList<>(Objects.requireNonNull(fields)); @@ -63,7 +61,7 @@ public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, Li int headerLength = 0; int descriptionLength = 0; - for (int i = 0; i < fields.size(); i ++) { + for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); if (field == sortField) { @@ -86,8 +84,8 @@ public FieldScreenPresenter(FieldScreenView fieldScreenView, Field sortField, Li public void init() { fieldScreenView.hideCursor(); fieldScreenView.clearTerminal(); - fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap, - currentPosition, headerMaxLength, descriptionMaxLength, moveMode); + fieldScreenView.showFieldScreen(sortField.getHeader(), fields, fieldDisplayMap, currentPosition, + headerMaxLength, descriptionMaxLength, moveMode); fieldScreenView.refreshTerminal(); } @@ -132,7 +130,7 @@ public void pageUp() { } public void pageDown() { - if (currentPosition < fields.size() - 1 && !moveMode) { + if (currentPosition < fields.size() - 1 && !moveMode) { int previousPosition = currentPosition; currentPosition = fields.size() - 1; showField(previousPosition); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java index 165850142247..e13431550f4a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/field/FieldScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The screen where we can change the displayed fields, the sort key and the order of the fields. */ @@ -41,11 +40,11 @@ public class FieldScreenView extends AbstractScreenView { private final FieldScreenPresenter fieldScreenPresenter; public FieldScreenView(Screen screen, Terminal terminal, Field sortField, List fields, - EnumMap fieldDisplayMap, FieldScreenPresenter.ResultListener resultListener, - ScreenView nextScreenView) { + EnumMap fieldDisplayMap, FieldScreenPresenter.ResultListener resultListener, + ScreenView nextScreenView) { super(screen, terminal); this.fieldScreenPresenter = new FieldScreenPresenter(this, sortField, fields, fieldDisplayMap, - resultListener, nextScreenView); + resultListener, nextScreenView); } @Override @@ -118,11 +117,11 @@ public ScreenView handleKeyPress(KeyPress keyPress) { } public void showFieldScreen(String sortFieldHeader, List fields, - EnumMap fieldDisplayMap, int currentPosition, int headerMaxLength, - int descriptionMaxLength, boolean moveMode) { + EnumMap fieldDisplayMap, int currentPosition, int headerMaxLength, + int descriptionMaxLength, boolean moveMode) { showScreenDescription(sortFieldHeader); - for (int i = 0; i < fields.size(); i ++) { + for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); showField(i, field, fieldDisplayMap.get(field), i == currentPosition, headerMaxLength, descriptionMaxLength, moveMode); @@ -134,17 +133,17 @@ public void showScreenDescription(String sortFieldHeader) { printer.startBold().print("Fields Management").stopBold().endOfLine(); printer.print("Current Sort Field: ").startBold().print(sortFieldHeader).stopBold().endOfLine(); printer.print("Navigate with up/down, Right selects for move then or Left commits,") - .endOfLine(); + .endOfLine(); printer.print("'d' or toggles display, 's' sets sort. Use 'q' or to end!") - .endOfLine(); + .endOfLine(); } public void showField(int pos, Field field, boolean display, boolean selected, - int fieldHeaderMaxLength, int fieldDescriptionMaxLength, boolean moveMode) { + int fieldHeaderMaxLength, int fieldDescriptionMaxLength, boolean moveMode) { String fieldHeader = String.format("%-" + fieldHeaderMaxLength + "s", field.getHeader()); - String fieldDescription = String.format("%-" + fieldDescriptionMaxLength + "s", - field.getDescription()); + String fieldDescription = + String.format("%-" + fieldDescriptionMaxLength + "s", field.getDescription()); int row = FIELD_START_ROW + pos; TerminalPrinter printer = getTerminalPrinter(row); @@ -157,8 +156,8 @@ public void showField(int pos, Field field, boolean display, boolean selected, printer.startBold(); } - printer.startHighlight() - .printFormat("%s = %s", fieldHeader, fieldDescription).stopHighlight(); + printer.startHighlight().printFormat("%s = %s", fieldHeader, fieldDescription) + .stopHighlight(); if (display) { printer.stopBold(); @@ -172,8 +171,8 @@ public void showField(int pos, Field field, boolean display, boolean selected, printer.startBold(); } - printer.startHighlight().print(fieldHeader).stopHighlight() - .printFormat(" = %s", fieldDescription); + printer.startHighlight().print(fieldHeader).stopHighlight().printFormat(" = %s", + fieldDescription); if (display) { printer.stopBold(); @@ -184,7 +183,7 @@ public void showField(int pos, Field field, boolean display, boolean selected, } else { if (display) { printer.print("* ").startBold().printFormat("%s = %s", fieldHeader, fieldDescription) - .stopBold().endOfLine(); + .stopBold().endOfLine(); } else { printer.printFormat(" %s = %s", fieldHeader, fieldDescription).endOfLine(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java index 5002ab8f6c18..218de676d4ec 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/CommandDescription.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +21,8 @@ import java.util.Collections; import java.util.List; import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a description of a command that we can execute in the top screen. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java index f170fc57fde1..bf72e58e7830 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,42 +19,39 @@ import java.util.Arrays; import java.util.Objects; - import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the help screen. */ @InterfaceAudience.Private public class HelpScreenPresenter { - private static final CommandDescription[] COMMAND_DESCRIPTIONS = new CommandDescription[] { - new CommandDescription("f", "Add/Remove/Order/Sort the fields"), - new CommandDescription("R", "Toggle the sort order (ascending/descending)"), - new CommandDescription("m", "Select mode"), - new CommandDescription("o", "Add a filter with ignoring case"), - new CommandDescription("O", "Add a filter with case sensitive"), - new CommandDescription("^o", "Show the current filters"), - new CommandDescription("=", "Clear the current filters"), - new CommandDescription("i", "Drill down"), - new CommandDescription( - Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"), - "Scroll the metrics"), - new CommandDescription("d", "Change the refresh delay"), - new CommandDescription("X", "Adjust the field length"), - new CommandDescription("", "Refresh the display"), - new CommandDescription("h", "Display this screen"), - new CommandDescription(Arrays.asList("q", ""), "Quit") - }; + private static final CommandDescription[] COMMAND_DESCRIPTIONS = + new CommandDescription[] { new CommandDescription("f", "Add/Remove/Order/Sort the fields"), + new CommandDescription("R", "Toggle the sort order (ascending/descending)"), + new CommandDescription("m", "Select mode"), + new CommandDescription("o", "Add a filter with ignoring case"), + new CommandDescription("O", "Add a filter with case sensitive"), + new CommandDescription("^o", "Show the current filters"), + new CommandDescription("=", "Clear the current filters"), + new CommandDescription("i", "Drill down"), + new CommandDescription( + Arrays.asList("up", "down", "left", "right", "pageUp", "pageDown", "home", "end"), + "Scroll the metrics"), + new CommandDescription("d", "Change the refresh delay"), + new CommandDescription("X", "Adjust the field length"), + new CommandDescription("", "Refresh the display"), + new CommandDescription("h", "Display this screen"), + new CommandDescription(Arrays.asList("q", ""), "Quit") }; private final HelpScreenView helpScreenView; private final long refreshDelay; private final ScreenView nextScreenView; public HelpScreenPresenter(HelpScreenView helpScreenView, long refreshDelay, - ScreenView nextScreenView) { + ScreenView nextScreenView) { this.helpScreenView = Objects.requireNonNull(helpScreenView); this.refreshDelay = refreshDelay; this.nextScreenView = Objects.requireNonNull(nextScreenView); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java index ccdc15737d17..e8c6a9c7efed 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/help/HelpScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The help screen. */ @@ -38,7 +37,7 @@ public class HelpScreenView extends AbstractScreenView { private final HelpScreenPresenter helpScreenPresenter; public HelpScreenView(Screen screen, Terminal terminal, long refreshDelay, - ScreenView nextScreenView) { + ScreenView nextScreenView) { super(screen, terminal); this.helpScreenPresenter = new HelpScreenPresenter(this, refreshDelay, nextScreenView); } @@ -68,12 +67,12 @@ public void showHelpScreen(long refreshDelay, CommandDescription[] commandDescri private void showScreenDescription(long refreshDelay) { TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW); printer.startBold().print("Help for Interactive Commands").stopBold().endOfLine(); - printer.print("Refresh delay: ").startBold() - .print((double) refreshDelay / 1000).stopBold().endOfLine(); + printer.print("Refresh delay: ").startBold().print((double) refreshDelay / 1000).stopBold() + .endOfLine(); } private void showCommandDescription(TerminalPrinter terminalPrinter, - CommandDescription commandDescription) { + CommandDescription commandDescription) { terminalPrinter.print(" "); boolean first = true; for (String key : commandDescription.getKeys()) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java index 8cd9879b0ede..cca1a996c879 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the mode screen. */ @@ -44,7 +43,7 @@ public class ModeScreenPresenter { private int currentPosition; public ModeScreenPresenter(ModeScreenView modeScreenView, Mode currentMode, - Consumer resultListener, ScreenView nextScreenView) { + Consumer resultListener, ScreenView nextScreenView) { this.modeScreenView = Objects.requireNonNull(modeScreenView); this.currentMode = Objects.requireNonNull(currentMode); this.resultListener = Objects.requireNonNull(resultListener); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java index bda9853028b7..d5af995e9dd0 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/mode/ModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; import org.apache.yetus.audience.InterfaceAudience; - /** * The screen where we can choose the {@link Mode} in the top screen. */ @@ -41,10 +40,10 @@ public class ModeScreenView extends AbstractScreenView { private final ModeScreenPresenter modeScreenPresenter; public ModeScreenView(Screen screen, Terminal terminal, Mode currentMode, - Consumer resultListener, ScreenView nextScreenView) { + Consumer resultListener, ScreenView nextScreenView) { super(screen, terminal); - this.modeScreenPresenter = new ModeScreenPresenter(this, currentMode, resultListener, - nextScreenView); + this.modeScreenPresenter = + new ModeScreenPresenter(this, currentMode, resultListener, nextScreenView); } @Override @@ -102,35 +101,35 @@ public ScreenView handleKeyPress(KeyPress keyPress) { } public void showModeScreen(Mode currentMode, List modes, int currentPosition, - int modeHeaderMaxLength, int modeDescriptionMaxLength) { + int modeHeaderMaxLength, int modeDescriptionMaxLength) { showScreenDescription(currentMode); for (int i = 0; i < modes.size(); i++) { - showMode(i, modes.get(i), i == currentPosition, - modeHeaderMaxLength, modeDescriptionMaxLength); + showMode(i, modes.get(i), i == currentPosition, modeHeaderMaxLength, + modeDescriptionMaxLength); } } private void showScreenDescription(Mode currentMode) { TerminalPrinter printer = getTerminalPrinter(SCREEN_DESCRIPTION_START_ROW); printer.startBold().print("Mode Management").stopBold().endOfLine(); - printer.print("Current mode: ") - .startBold().print(currentMode.getHeader()).stopBold().endOfLine(); + printer.print("Current mode: ").startBold().print(currentMode.getHeader()).stopBold() + .endOfLine(); printer.print("Select mode followed by ").endOfLine(); } public void showMode(int pos, Mode mode, boolean selected, int modeHeaderMaxLength, - int modeDescriptionMaxLength) { + int modeDescriptionMaxLength) { String modeHeader = String.format("%-" + modeHeaderMaxLength + "s", mode.getHeader()); - String modeDescription = String.format("%-" + modeDescriptionMaxLength + "s", - mode.getDescription()); + String modeDescription = + String.format("%-" + modeDescriptionMaxLength + "s", mode.getDescription()); int row = MODE_START_ROW + pos; TerminalPrinter printer = getTerminalPrinter(row); if (selected) { printer.startHighlight().print(modeHeader).stopHighlight() - .printFormat(" = %s", modeDescription).endOfLine(); + .printFormat(" = %s", modeDescription).endOfLine(); } else { printer.printFormat("%s = %s", modeHeader, modeDescription).endOfLine(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java index 6c6bf1c1b215..3fe4da280542 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the filter display mode. */ @@ -37,7 +36,7 @@ public class FilterDisplayModeScreenPresenter { private final ScreenView nextScreenView; public FilterDisplayModeScreenPresenter(FilterDisplayModeScreenView filterDisplayModeScreenView, - List filters, ScreenView nextScreenView) { + List filters, ScreenView nextScreenView) { this.filterDisplayModeScreenView = Objects.requireNonNull(filterDisplayModeScreenView); this.filters = Collections.unmodifiableList(new ArrayList<>(Objects.requireNonNull(filters))); this.nextScreenView = Objects.requireNonNull(nextScreenView); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java index e85a4b7df42c..9bf16a73c273 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/FilterDisplayModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,11 +27,8 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** - * The filter display mode in the top screen. - * - * Exit if Enter key is pressed. + * The filter display mode in the top screen. Exit if Enter key is pressed. */ @InterfaceAudience.Private public class FilterDisplayModeScreenView extends AbstractScreenView { @@ -40,11 +37,11 @@ public class FilterDisplayModeScreenView extends AbstractScreenView { private final FilterDisplayModeScreenPresenter filterDisplayModeScreenPresenter; public FilterDisplayModeScreenView(Screen screen, Terminal terminal, int row, - List filters, ScreenView nextScreenView) { + List filters, ScreenView nextScreenView) { super(screen, terminal); this.row = row; this.filterDisplayModeScreenPresenter = - new FilterDisplayModeScreenPresenter(this, filters, nextScreenView); + new FilterDisplayModeScreenPresenter(this, filters, nextScreenView); } @Override @@ -68,6 +65,6 @@ public void showFilters(List filters) { } getTerminalPrinter(row).startBold().print(" to resume, filters: " + filtersString) - .stopBold().endOfLine(); + .stopBold().endOfLine(); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java index df672e9695d9..98a059faacc7 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Header.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.hbtop.field.Field; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents headers for the metrics in the top screen. */ @@ -36,7 +35,7 @@ public Header(Field field, int length) { } public String format() { - return "%" + (field.isLeftJustify() ? "-" : "") + length + "s"; + return "%" + (field.isLeftJustify() ? "-" : "") + length + "s"; } public Field getField() { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java index 8ab858b995f3..5551f3bc1701 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the input mode. */ @@ -42,7 +41,7 @@ public class InputModeScreenPresenter { private int historyPosition = -1; public InputModeScreenPresenter(InputModeScreenView inputModeScreenView, String message, - @Nullable List histories, Function resultListener) { + @Nullable List histories, Function resultListener) { this.inputModeScreenView = Objects.requireNonNull(inputModeScreenView); this.message = Objects.requireNonNull(message); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java index ab64a8ade227..311d86611ec7 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/InputModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The input mode in the top screen. */ @@ -37,11 +36,11 @@ public class InputModeScreenView extends AbstractScreenView { private final InputModeScreenPresenter inputModeScreenPresenter; public InputModeScreenView(Screen screen, Terminal terminal, int row, String message, - List histories, Function resultListener) { + List histories, Function resultListener) { super(screen, terminal); this.row = row; - this.inputModeScreenPresenter = new InputModeScreenPresenter(this, message, histories, - resultListener); + this.inputModeScreenPresenter = + new InputModeScreenPresenter(this, message, histories, resultListener); } @Override @@ -100,7 +99,7 @@ public ScreenView handleKeyPress(KeyPress keyPress) { public void showInput(String message, String inputString, int cursorPosition) { getTerminalPrinter(row).startBold().print(message).stopBold().print(" ").print(inputString) - .endOfLine(); + .endOfLine(); setCursorPosition(message.length() + 1 + cursorPosition, row); refreshTerminal(); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java index 174a15a48432..ec83634566ee 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,11 +21,8 @@ import org.apache.hadoop.hbase.hbtop.screen.ScreenView; import org.apache.yetus.audience.InterfaceAudience; - /** - * The presentation logic for the message mode. - * - * Exit after 2 seconds or if any key is pressed. + * The presentation logic for the message mode. Exit after 2 seconds or if any key is pressed. */ @InterfaceAudience.Private public class MessageModeScreenPresenter { @@ -35,7 +32,7 @@ public class MessageModeScreenPresenter { private final ScreenView nextScreenView; public MessageModeScreenPresenter(MessageModeScreenView messageModeScreenView, String message, - ScreenView nextScreenView) { + ScreenView nextScreenView) { this.messageModeScreenView = Objects.requireNonNull(messageModeScreenView); this.message = Objects.requireNonNull(message); this.nextScreenView = Objects.requireNonNull(nextScreenView); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java index 0dfa388fad0c..9da5d27bbb97 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/MessageModeScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.yetus.audience.InterfaceAudience; - /** * The message mode in the top screen. */ @@ -35,11 +34,10 @@ public class MessageModeScreenView extends AbstractScreenView { private final MessageModeScreenPresenter messageModeScreenPresenter; public MessageModeScreenView(Screen screen, Terminal terminal, int row, String message, - ScreenView nextScreenView) { + ScreenView nextScreenView) { super(screen, terminal); this.row = row; - this.messageModeScreenPresenter = - new MessageModeScreenPresenter(this, message, nextScreenView); + this.messageModeScreenPresenter = new MessageModeScreenPresenter(this, message, nextScreenView); } @Override @@ -61,6 +59,6 @@ public ScreenView handleKeyPress(KeyPress keyPress) { public void showMessage(String message) { getTerminalPrinter(row).startHighlight().print(" ").print(message).print(" ").stopHighlight() - .endOfLine(); + .endOfLine(); } } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java index b95e6f480e6e..4f93dda8ec5f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Paging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for paging for the metrics. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java index 03598f66fb48..66d64a7cc66a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/Summary.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the summary of the metrics. */ @@ -37,9 +36,9 @@ public class Summary { private final double averageLoad; private final long aggregateRequestPerSecond; - public Summary(String currentTime, String version, String clusterId, int servers, - int liveServers, int deadServers, int regionCount, int ritCount, double averageLoad, - long aggregateRequestPerSecond) { + public Summary(String currentTime, String version, String clusterId, int servers, int liveServers, + int deadServers, int regionCount, int ritCount, double averageLoad, + long aggregateRequestPerSecond) { this.currentTime = Objects.requireNonNull(currentTime); this.version = Objects.requireNonNull(version); this.clusterId = Objects.requireNonNull(clusterId); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java index 9cbcd18e885f..b312addc6b59 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * The data and business logic for the top screen. */ @@ -66,33 +65,32 @@ public class TopScreenModel { private boolean ascendingSort; public TopScreenModel(Admin admin, Mode initialMode, @Nullable List initialFields, - @Nullable Field initialSortField, @Nullable Boolean initialAscendingSort, - @Nullable List initialFilters) { + @Nullable Field initialSortField, @Nullable Boolean initialAscendingSort, + @Nullable List initialFilters) { this.admin = Objects.requireNonNull(admin); switchMode(Objects.requireNonNull(initialMode), initialSortField, false, initialFields, initialAscendingSort, initialFilters); } public void switchMode(Mode nextMode, boolean keepSortFieldAndSortOrderIfPossible, - List initialFilters) { + List initialFilters) { switchMode(nextMode, null, keepSortFieldAndSortOrderIfPossible, null, null, initialFilters); } public void switchMode(Mode nextMode, Field initialSortField, - boolean keepSortFieldAndSortOrderIfPossible, @Nullable List initialFields, - @Nullable Boolean initialAscendingSort, @Nullable List initialFilters) { + boolean keepSortFieldAndSortOrderIfPossible, @Nullable List initialFields, + @Nullable Boolean initialAscendingSort, @Nullable List initialFilters) { currentMode = nextMode; fieldInfos = Collections.unmodifiableList(new ArrayList<>(currentMode.getFieldInfos())); if (initialFields != null) { List tmp = new ArrayList<>(initialFields); tmp.addAll(currentMode.getFieldInfos().stream().map(FieldInfo::getField) - .filter(f -> !initialFields.contains(f)) - .collect(Collectors.toList())); + .filter(f -> !initialFields.contains(f)).collect(Collectors.toList())); fields = Collections.unmodifiableList(tmp); } else { - fields = Collections.unmodifiableList(currentMode.getFieldInfos().stream() - .map(FieldInfo::getField).collect(Collectors.toList())); + fields = Collections.unmodifiableList( + currentMode.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList())); } if (keepSortFieldAndSortOrderIfPossible) { @@ -146,8 +144,7 @@ public void refreshMetricsData() { } private void refreshSummary(ClusterMetrics clusterMetrics) { - String currentTime = ISO_8601_EXTENDED_TIME_FORMAT - .format(EnvironmentEdgeManager.currentTime()); + String currentTime = ISO_8601_EXTENDED_TIME_FORMAT.format(EnvironmentEdgeManager.currentTime()); String version = clusterMetrics.getHBaseVersion(); String clusterId = clusterMetrics.getClusterId(); int liveServers = clusterMetrics.getLiveServerMetrics().size(); @@ -156,23 +153,22 @@ private void refreshSummary(ClusterMetrics clusterMetrics) { int ritCount = clusterMetrics.getRegionStatesInTransition().size(); double averageLoad = clusterMetrics.getAverageLoad(); long aggregateRequestPerSecond = clusterMetrics.getLiveServerMetrics().entrySet().stream() - .mapToLong(e -> e.getValue().getRequestCountPerSecond()).sum(); + .mapToLong(e -> e.getValue().getRequestCountPerSecond()).sum(); - summary = new Summary(currentTime, version, clusterId, liveServers + deadServers, - liveServers, deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond); + summary = new Summary(currentTime, version, clusterId, liveServers + deadServers, liveServers, + deadServers, regionCount, ritCount, averageLoad, aggregateRequestPerSecond); } private void refreshRecords(ClusterMetrics clusterMetrics) { List records = currentMode.getRecords(clusterMetrics, pushDownFilters); // Filter and sort - records = records.stream() - .filter(r -> filters.stream().allMatch(f -> f.execute(r))) - .sorted((recordLeft, recordRight) -> { - FieldValue left = recordLeft.get(currentSortField); - FieldValue right = recordRight.get(currentSortField); - return (ascendingSort ? 1 : -1) * left.compareTo(right); - }).collect(Collectors.toList()); + records = records.stream().filter(r -> filters.stream().allMatch(f -> f.execute(r))) + .sorted((recordLeft, recordRight) -> { + FieldValue left = recordLeft.get(currentSortField); + FieldValue right = recordRight.get(currentSortField); + return (ascendingSort ? 1 : -1) * left.compareTo(right); + }).collect(Collectors.toList()); this.records = Collections.unmodifiableList(records); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java index e4e3caee5940..ffc02142ed03 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; - /** * The presentation logic for the top screen. */ @@ -63,7 +62,7 @@ public class TopScreenPresenter { private long iterations; public TopScreenPresenter(TopScreenView topScreenView, long initialRefreshDelay, - TopScreenModel topScreenModel, @Nullable List initialFields, long numberOfIterations) { + TopScreenModel topScreenModel, @Nullable List initialFields, long numberOfIterations) { this.topScreenView = Objects.requireNonNull(topScreenView); this.refreshDelay = new AtomicLong(initialRefreshDelay); this.topScreenModel = Objects.requireNonNull(topScreenModel); @@ -78,7 +77,7 @@ public void init() { } private void updateTerminalLengthAndPageSize(@Nullable TerminalSize terminalSize, - @Nullable Integer pageSize) { + @Nullable Integer pageSize) { if (terminalSize != null) { terminalLength = terminalSize.getColumns(); } else { @@ -132,8 +131,7 @@ private void adjustFieldLengthIfNeeded() { for (Field f : topScreenModel.getFields()) { if (f.isAutoAdjust()) { int maxLength = topScreenModel.getRecords().stream() - .map(r -> r.get(f).asString().length()) - .max(Integer::compareTo).orElse(0); + .map(r -> r.get(f).asString().length()).max(Integer::compareTo).orElse(0); fieldLengthMap.put(f, Math.max(maxLength, f.getHeader().length())); } } @@ -141,9 +139,8 @@ private void adjustFieldLengthIfNeeded() { } private List

    getDisplayedHeaders() { - List displayFields = - topScreenModel.getFields().stream() - .filter(fieldDisplayMap::get).collect(Collectors.toList()); + List displayFields = topScreenModel.getFields().stream().filter(fieldDisplayMap::get) + .collect(Collectors.toList()); if (displayFields.isEmpty()) { horizontalScroll = 0; @@ -231,8 +228,7 @@ public void end() { } private int getHeaderSize() { - return (int) topScreenModel.getFields().stream() - .filter(fieldDisplayMap::get).count(); + return (int) topScreenModel.getFields().stream().filter(fieldDisplayMap::get).count(); } public void switchSortOrder() { @@ -246,18 +242,16 @@ public ScreenView transitionToHelpScreen(Screen screen, Terminal terminal) { public ScreenView transitionToModeScreen(Screen screen, Terminal terminal) { return new ModeScreenView(screen, terminal, topScreenModel.getCurrentMode(), this::switchMode, - topScreenView); + topScreenView); } public ScreenView transitionToFieldScreen(Screen screen, Terminal terminal) { - return new FieldScreenView(screen, terminal, - topScreenModel.getCurrentSortField(), topScreenModel.getFields(), - fieldDisplayMap, - (sortField, fields, fieldDisplayMap) -> { - topScreenModel.setSortFieldAndFields(sortField, fields); - this.fieldDisplayMap.clear(); - this.fieldDisplayMap.putAll(fieldDisplayMap); - }, topScreenView); + return new FieldScreenView(screen, terminal, topScreenModel.getCurrentSortField(), + topScreenModel.getFields(), fieldDisplayMap, (sortField, fields, fieldDisplayMap) -> { + topScreenModel.setSortFieldAndFields(sortField, fields); + this.fieldDisplayMap.clear(); + this.fieldDisplayMap.putAll(fieldDisplayMap); + }, topScreenView); } private void switchMode(Mode nextMode) { @@ -303,42 +297,41 @@ public ScreenView goToMessageMode(Screen screen, Terminal terminal, int row, Str public ScreenView goToInputModeForRefreshDelay(Screen screen, Terminal terminal, int row) { return new InputModeScreenView(screen, terminal, row, - "Change refresh delay from " + (double) refreshDelay.get() / 1000 + " to", null, - (inputString) -> { - if (inputString.isEmpty()) { + "Change refresh delay from " + (double) refreshDelay.get() / 1000 + " to", null, + (inputString) -> { + if (inputString.isEmpty()) { + return topScreenView; + } + + double delay; + try { + delay = Double.parseDouble(inputString); + } catch (NumberFormatException e) { + return goToMessageMode(screen, terminal, row, "Unacceptable floating point"); + } + + refreshDelay.set((long) (delay * 1000)); return topScreenView; - } - - double delay; - try { - delay = Double.parseDouble(inputString); - } catch (NumberFormatException e) { - return goToMessageMode(screen, terminal, row, "Unacceptable floating point"); - } - - refreshDelay.set((long) (delay * 1000)); - return topScreenView; - }); + }); } public ScreenView goToInputModeForFilter(Screen screen, Terminal terminal, int row, - boolean ignoreCase) { + boolean ignoreCase) { return new InputModeScreenView(screen, terminal, row, - "add filter #" + (topScreenModel.getFilters().size() + 1) + - " (" + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL", - topScreenModel.getFilterHistories(), - (inputString) -> { - if (inputString.isEmpty()) { + "add filter #" + (topScreenModel.getFilters().size() + 1) + " (" + + (ignoreCase ? "ignoring case" : "case sensitive") + ") as: [!]FLD?VAL", + topScreenModel.getFilterHistories(), (inputString) -> { + if (inputString.isEmpty()) { + return topScreenView; + } + + if (!topScreenModel.addFilter(inputString, ignoreCase)) { + return goToMessageMode(screen, terminal, row, "Unacceptable filter expression"); + } + + paging.init(); return topScreenView; - } - - if (!topScreenModel.addFilter(inputString, ignoreCase)) { - return goToMessageMode(screen, terminal, row, "Unacceptable filter expression"); - } - - paging.init(); - return topScreenView; - }); + }); } public void clearFilters() { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java index da5c88360d19..123c7217f28e 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/screen/top/TopScreenView.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,12 +35,10 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; import org.apache.yetus.audience.InterfaceAudience; - /** - * The screen that provides a dynamic real-time view for the HBase metrics. - * - * This shows the metric {@link Summary} and the metric {@link Record}s. The summary and the - * metrics are updated periodically (3 seconds by default). + * The screen that provides a dynamic real-time view for the HBase metrics. This shows the metric + * {@link Summary} and the metric {@link Record}s. The summary and the metrics are updated + * periodically (3 seconds by default). */ @InterfaceAudience.Private public class TopScreenView extends AbstractScreenView { @@ -55,13 +53,14 @@ public class TopScreenView extends AbstractScreenView { private Integer pageSize; public TopScreenView(Screen screen, Terminal terminal, long initialRefreshDelay, Admin admin, - Mode initialMode, @Nullable List initialFields, @Nullable Field initialSortField, - @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, - long numberOfIterations) { + Mode initialMode, @Nullable List initialFields, @Nullable Field initialSortField, + @Nullable Boolean initialAscendingSort, @Nullable List initialFilters, + long numberOfIterations) { super(screen, terminal); - this.topScreenPresenter = new TopScreenPresenter(this, initialRefreshDelay, - new TopScreenModel(admin, initialMode, initialFields, initialSortField, - initialAscendingSort, initialFilters), initialFields, numberOfIterations); + this.topScreenPresenter = new TopScreenPresenter( + this, initialRefreshDelay, new TopScreenModel(admin, initialMode, initialFields, + initialSortField, initialAscendingSort, initialFilters), + initialFields, numberOfIterations); } @Override @@ -223,7 +222,7 @@ public Integer getPageSize() { } public void showTopScreen(Summary summary, List
    headers, List records, - Record selectedRecord) { + Record selectedRecord) { showSummary(summary); clearMessage(); showHeaders(headers); @@ -235,23 +234,17 @@ private void showSummary(Summary summary) { printer.print(String.format("HBase hbtop - %s", summary.getCurrentTime())).endOfLine(); printer.print(String.format("Version: %s", summary.getVersion())).endOfLine(); printer.print(String.format("Cluster ID: %s", summary.getClusterId())).endOfLine(); - printer.print("RegionServer(s): ") - .startBold().print(Integer.toString(summary.getServers())).stopBold() - .print(" total, ") - .startBold().print(Integer.toString(summary.getLiveServers())).stopBold() - .print(" live, ") - .startBold().print(Integer.toString(summary.getDeadServers())).stopBold() - .print(" dead").endOfLine(); - printer.print("RegionCount: ") - .startBold().print(Integer.toString(summary.getRegionCount())).stopBold() - .print(" total, ") - .startBold().print(Integer.toString(summary.getRitCount())).stopBold() - .print(" rit").endOfLine(); - printer.print("Average Cluster Load: ") - .startBold().print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine(); - printer.print("Aggregate Request/s: ") - .startBold().print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold() - .endOfLine(); + printer.print("RegionServer(s): ").startBold().print(Integer.toString(summary.getServers())) + .stopBold().print(" total, ").startBold().print(Integer.toString(summary.getLiveServers())) + .stopBold().print(" live, ").startBold().print(Integer.toString(summary.getDeadServers())) + .stopBold().print(" dead").endOfLine(); + printer.print("RegionCount: ").startBold().print(Integer.toString(summary.getRegionCount())) + .stopBold().print(" total, ").startBold().print(Integer.toString(summary.getRitCount())) + .stopBold().print(" rit").endOfLine(); + printer.print("Average Cluster Load: ").startBold() + .print(String.format("%.2f", summary.getAverageLoad())).stopBold().endOfLine(); + printer.print("Aggregate Request/s: ").startBold() + .print(Long.toString(summary.getAggregateRequestPerSecond())).stopBold().endOfLine(); } private void showRecords(List
    headers, List records, Record selectedRecord) { @@ -264,7 +257,7 @@ private void showRecords(List
    headers, List records, Record sele } List buf = new ArrayList<>(headers.size()); for (int i = 0; i < size; i++) { - if(i < records.size()) { + if (i < records.size()) { Record record = records.get(i); buf.clear(); for (Header header : headers) { @@ -293,16 +286,15 @@ private void showRecords(List
    headers, List records, Record sele } private void showHeaders(List
    headers) { - String header = headers.stream() - .map(h -> String.format(h.format(), h.getField().getHeader())) - .collect(Collectors.joining(" ")); + String header = headers.stream().map(h -> String.format(h.format(), h.getField().getHeader())) + .collect(Collectors.joining(" ")); if (!header.isEmpty()) { header += " "; } getTerminalPrinter(RECORD_HEADER_ROW).startHighlight().print(header).stopHighlight() - .endOfLine(); + .endOfLine(); } private String limitLineLength(String line, int length) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java index 9322aaa8157f..a0fba228e72f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Attributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * The attributes of text in the terminal. */ @@ -117,8 +116,8 @@ public boolean equals(Object o) { } Attributes that = (Attributes) o; return bold == that.bold && blink == that.blink && reverse == that.reverse - && underline == that.underline && foregroundColor == that.foregroundColor - && backgroundColor == that.backgroundColor; + && underline == that.underline && foregroundColor == that.foregroundColor + && backgroundColor == that.backgroundColor; } @Override diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java index 843a315ab716..e0a1643a34f3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Color.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * Terminal color definitions. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java index 775ff3d72e6a..11da1b58c6e3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/CursorPosition.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * A 2-d position in 'terminal space'. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java index d0be00c5868d..53c4da17e96a 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/KeyPress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,42 +21,15 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents the user pressing a key on the keyboard. */ @InterfaceAudience.Private public class KeyPress { public enum Type { - Character, - Escape, - Backspace, - ArrowLeft, - ArrowRight, - ArrowUp, - ArrowDown, - Insert, - Delete, - Home, - End, - PageUp, - PageDown, - ReverseTab, - Tab, - Enter, - F1, - F2, - F3, - F4, - F5, - F6, - F7, - F8, - F9, - F10, - F11, - F12, - Unknown + Character, Escape, Backspace, ArrowLeft, ArrowRight, ArrowUp, ArrowDown, Insert, Delete, Home, + End, PageUp, PageDown, ReverseTab, Tab, Enter, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, + F12, Unknown } private final Type type; @@ -66,7 +39,7 @@ public enum Type { private final boolean shift; public KeyPress(Type type, @Nullable Character character, boolean alt, boolean ctrl, - boolean shift) { + boolean shift) { this.type = Objects.requireNonNull(type); this.character = character; this.alt = alt; @@ -97,13 +70,8 @@ public boolean isShift() { @Override public String toString() { - return "KeyPress{" + - "type=" + type + - ", character=" + escape(character) + - ", alt=" + alt + - ", ctrl=" + ctrl + - ", shift=" + shift + - '}'; + return "KeyPress{" + "type=" + type + ", character=" + escape(character) + ", alt=" + alt + + ", ctrl=" + ctrl + ", shift=" + shift + '}'; } private String escape(Character character) { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java index c834b7515c24..f34cfc298c62 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/Terminal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,19 +21,29 @@ import java.io.Closeable; import org.apache.yetus.audience.InterfaceAudience; - /** * The terminal interface that is an abstraction of terminal screen. */ @InterfaceAudience.Private public interface Terminal extends Closeable { void clear(); + void refresh(); - @Nullable TerminalSize getSize(); - @Nullable TerminalSize doResizeIfNecessary(); - @Nullable KeyPress pollKeyPress(); + + @Nullable + TerminalSize getSize(); + + @Nullable + TerminalSize doResizeIfNecessary(); + + @Nullable + KeyPress pollKeyPress(); + CursorPosition getCursorPosition(); + void setCursorPosition(int column, int row); + void hideCursor(); + TerminalPrinter getTerminalPrinter(int startRow); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java index 66fb55875b0e..52818e42a7d3 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import org.apache.yetus.audience.InterfaceAudience; - /** * The interface responsible for printing to the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java index f7e55dde7b54..7aea3dac115b 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/TerminalSize.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.Objects; import org.apache.yetus.audience.InterfaceAudience; - /** * Terminal dimensions in 2-d space, measured in number of rows and columns. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java index de61477ce33a..6cd9475c6d0f 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/Cell.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a single text cell of the terminal. */ diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java index 52f8e374364e..4133d6cb6e4b 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/EscapeSequences.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.Color; import org.apache.yetus.audience.InterfaceAudience; - /** * Utility class for escape sequences. */ @@ -54,7 +53,7 @@ public static String clearRemainingLine() { } public static String color(Color foregroundColor, Color backgroundColor, boolean bold, - boolean reverse, boolean blink, boolean underline) { + boolean reverse, boolean blink, boolean underline) { int foregroundColorValue = getColorValue(foregroundColor, true); int backgroundColorValue = getColorValue(backgroundColor, false); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java index a20222c3eb5b..937dbed63d27 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/KeyPressGenerator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,9 @@ import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; - /** - * This generates {@link KeyPress} objects from the given input stream and offers them to the - * given queue. + * This generates {@link KeyPress} objects from the given input stream and offers them to the given + * queue. */ @InterfaceAudience.Private public class KeyPressGenerator { @@ -67,9 +66,9 @@ public KeyPressGenerator(InputStream inputStream, Queue keyPressQueue) input = new InputStreamReader(inputStream, StandardCharsets.UTF_8); this.keyPressQueue = keyPressQueue; - executorService = Executors.newFixedThreadPool(2, new ThreadFactoryBuilder() - .setNameFormat("KeyPressGenerator-%d").setDaemon(true) - .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); + executorService = Executors.newFixedThreadPool(2, + new ThreadFactoryBuilder().setNameFormat("KeyPressGenerator-%d").setDaemon(true) + .setUncaughtExceptionHandler(Threads.LOGGING_EXCEPTION_HANDLER).build()); initState(); } @@ -469,8 +468,8 @@ private boolean isCtrl(int param) { private void offer(KeyPress keyPress) { // Handle ctrl + c - if (keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character && - keyPress.getCharacter() == 'c') { + if (keyPress.isCtrl() && keyPress.getType() == KeyPress.Type.Character + && keyPress.getCharacter() == 'c') { System.exit(0); } diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java index 8752c5fe689a..b00769c0401d 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/ScreenBuffer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.hbtop.terminal.CursorPosition; import org.apache.yetus.audience.InterfaceAudience; - /** * Represents a buffer of the terminal screen for double-buffering. */ @@ -78,8 +77,8 @@ public void flush(PrintWriter output) { flushRow(row, sb, attributes); } - if (cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows && - cursorColumn < columns) { + if (cursorVisible && cursorRow >= 0 && cursorColumn >= 0 && cursorRow < rows + && cursorColumn < columns) { sb.append(cursor(true)); sb.append(moveCursor(cursorColumn, cursorRow)); } else { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java index c6b74afcbfa5..a4416a82fac8 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,10 +43,9 @@ import org.slf4j.LoggerFactory; /** - * An implementation of the {@link Terminal} interface for normal display mode. - * - * This implementation produces output intended for human viewing. In particular, it only displays - * one screenful of data. The output contains some escape sequences for formatting. + * An implementation of the {@link Terminal} interface for normal display mode. This implementation + * produces output intended for human viewing. In particular, it only displays one screenful of + * data. The output contains some escape sequences for formatting. */ @InterfaceAudience.Private public class TerminalImpl implements Terminal { @@ -181,8 +180,8 @@ private TerminalSize queryTerminalSize() { } private void sttyRaw() { - doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " + - "-echo -echonl -icanon -isig -iexten -parenb cs8 min 1"); + doStty("-ignbrk -brkint -parmrk -istrip -inlcr -igncr -icrnl -ixon -opost " + + "-echo -echonl -icanon -isig -iexten -parenb cs8 min 1"); } private void sttyCooked() { @@ -190,7 +189,7 @@ private void sttyCooked() { } private String doStty(String sttyOptionsString) { - String [] cmd = {"/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty"}; + String[] cmd = { "/bin/sh", "-c", "stty " + sttyOptionsString + " < /dev/tty" }; try { Process process = Runtime.getRuntime().exec(cmd); @@ -198,14 +197,14 @@ private String doStty(String sttyOptionsString) { String ret; // stdout - try (BufferedReader stdout = new BufferedReader(new InputStreamReader( - process.getInputStream(), StandardCharsets.UTF_8))) { + try (BufferedReader stdout = new BufferedReader( + new InputStreamReader(process.getInputStream(), StandardCharsets.UTF_8))) { ret = stdout.readLine(); } // stderr - try (BufferedReader stderr = new BufferedReader(new InputStreamReader( - process.getErrorStream(), StandardCharsets.UTF_8))) { + try (BufferedReader stderr = new BufferedReader( + new InputStreamReader(process.getErrorStream(), StandardCharsets.UTF_8))) { String line = stderr.readLine(); if ((line != null) && (line.length() > 0)) { LOGGER.error("Error output from stty: " + line); diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java index 788d26799581..05e0b5611533 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TerminalPrinterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java index 60f550289e26..ba7a5de40a5c 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminal.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,11 +25,9 @@ import org.apache.hadoop.hbase.hbtop.terminal.TerminalSize; /** - * An implementation of the {@link Terminal} interface for batch mode. - * - * This implementation produces output that's more sensible for collecting to a log file or for - * parsing. There is no limit on the number of output lines, and the output doesn't contain any - * escape sequences for formatting. + * An implementation of the {@link Terminal} interface for batch mode. This implementation produces + * output that's more sensible for collecting to a log file or for parsing. There is no limit on the + * number of output lines, and the output doesn't contain any escape sequences for formatting. */ public class BatchTerminal implements Terminal { diff --git a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java index 60316669daaf..ed216a164926 100644 --- a/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java +++ b/hbase-hbtop/src/main/java/org/apache/hadoop/hbase/hbtop/terminal/impl/batch/BatchTerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java index 339cc40847d3..09722639cada 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecord.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,20 +28,17 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRecord { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRecord.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRecord.class); @Test public void testBuilder() { Record actual1 = Record.builder().put(Field.TABLE, "tableName") - .put(entry(Field.REGION_COUNT, 3)) - .put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L)) - .build(); + .put(entry(Field.REGION_COUNT, 3)) + .put(Field.REQUEST_COUNT_PER_SECOND, Field.REQUEST_COUNT_PER_SECOND.newValue(100L)).build(); assertThat(actual1.size(), is(3)); assertThat(actual1.get(Field.TABLE).asString(), is("tableName")); @@ -58,11 +55,8 @@ public void testBuilder() { @Test public void testOfEntries() { - Record actual = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 3), - entry(Field.REQUEST_COUNT_PER_SECOND, 100L) - ); + Record actual = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 3), + entry(Field.REQUEST_COUNT_PER_SECOND, 100L)); assertThat(actual.size(), is(3)); assertThat(actual.get(Field.TABLE).asString(), is("tableName")); @@ -72,17 +66,11 @@ public void testOfEntries() { @Test public void testCombine() { - Record record1 = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 3), - entry(Field.REQUEST_COUNT_PER_SECOND, 100L) - ); + Record record1 = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 3), + entry(Field.REQUEST_COUNT_PER_SECOND, 100L)); - Record record2 = Record.ofEntries( - entry(Field.TABLE, "tableName"), - entry(Field.REGION_COUNT, 5), - entry(Field.REQUEST_COUNT_PER_SECOND, 500L) - ); + Record record2 = Record.ofEntries(entry(Field.TABLE, "tableName"), entry(Field.REGION_COUNT, 5), + entry(Field.REQUEST_COUNT_PER_SECOND, 500L)); Record actual = record1.combine(record2); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java index 2807fd8ef61e..e4805336a7fc 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestRecordFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,21 +36,19 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRecordFilter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRecordFilter.class); + HBaseClassTestRule.forClass(TestRecordFilter.class); @Test public void testParseAndBuilder() { testParseAndBuilder("REGION=region1", false, RecordFilter.newBuilder(Field.REGION).equal("region1")); - testParseAndBuilder("REGION=", false, - RecordFilter.newBuilder(Field.REGION).equal("")); + testParseAndBuilder("REGION=", false, RecordFilter.newBuilder(Field.REGION).equal("")); testParseAndBuilder("!REGION=region1", false, RecordFilter.newBuilder(Field.REGION).notEqual("region1")); @@ -132,8 +130,8 @@ private void testToString(String filterString) { public void testFilters() { List records = createTestRecords(); - testFilter(records, "REGION=region", false, - "region1", "region2", "region3", "region4", "region5"); + testFilter(records, "REGION=region", false, "region1", "region2", "region3", "region4", + "region5"); testFilter(records, "!REGION=region", false); testFilter(records, "REGION=Region", false); @@ -148,8 +146,7 @@ public void testFilters() { testFilter(records, "LOCALITY<0.5", false, "region5"); testFilter(records, "%COMP<=50%", false, "region2", "region3", "region4", "region5"); - testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false, - "region2", "region5"); + testFilters(records, Arrays.asList("SF>=100MB", "#REQ/S>100"), false, "region2", "region5"); testFilters(records, Arrays.asList("%COMP<=50%", "!#SF>=10"), false, "region4"); testFilters(records, Arrays.asList("!REGION==region1", "LOCALITY<0.5", "#REQ/S>100"), false, "region5"); @@ -159,10 +156,10 @@ public void testFilters() { public void testFiltersIgnoreCase() { List records = createTestRecords(); - testFilter(records, "REGION=Region", true, - "region1", "region2", "region3", "region4", "region5"); - testFilter(records, "REGION=REGION", true, - "region1", "region2", "region3", "region4", "region5"); + testFilter(records, "REGION=Region", true, "region1", "region2", "region3", "region4", + "region5"); + testFilter(records, "REGION=REGION", true, "region1", "region2", "region3", "region4", + "region5"); } private List createTestRecords() { @@ -175,8 +172,8 @@ private List createTestRecords() { return ret; } - private Record createTestRecord(String region, long requestCountPerSecond, - Size storeFileSize, int numStoreFiles, float locality, float compactionProgress) { + private Record createTestRecord(String region, long requestCountPerSecond, Size storeFileSize, + int numStoreFiles, float locality, float compactionProgress) { Record.Builder builder = Record.builder(); builder.put(Field.REGION, region); builder.put(Field.REQUEST_COUNT_PER_SECOND, requestCountPerSecond); @@ -188,18 +185,16 @@ private Record createTestRecord(String region, long requestCountPerSecond, } private void testFilter(List records, String filterString, boolean ignoreCase, - String... expectedRegions) { + String... expectedRegions) { testFilters(records, Collections.singletonList(filterString), ignoreCase, expectedRegions); } private void testFilters(List records, List filterStrings, boolean ignoreCase, - String... expectedRegions) { - List actual = - records.stream().filter(r -> filterStrings.stream() - .map(f -> RecordFilter.parse(f, ignoreCase)) - .allMatch(f -> f.execute(r))) - .map(r -> r.get(Field.REGION).asString()) - .collect(Collectors.toList()); + String... expectedRegions) { + List actual = records.stream() + .filter(r -> filterStrings.stream().map(f -> RecordFilter.parse(f, ignoreCase)) + .allMatch(f -> f.execute(r))) + .map(r -> r.get(Field.REGION).asString()).collect(Collectors.toList()); assertThat(actual, hasItems(expectedRegions)); assertThat(actual.size(), is(expectedRegions.length)); } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java index c633e37825ea..7db264d38dd8 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/TestUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.master.RegionState; import org.apache.hadoop.hbase.util.Bytes; - public final class TestUtils { private TestUtils() { @@ -57,114 +56,92 @@ public static ClusterMetrics createDummyClusterMetrics() { // host1 List regionMetricsList = new ArrayList<>(); List userMetricsList = new ArrayList<>(); - userMetricsList.add(createUserMetrics("FOO",1,2, 4)); - userMetricsList.add(createUserMetrics("BAR",2,3, 3)); - regionMetricsList.add(createRegionMetrics( - "table1,,1.00000000000000000000000000000000.", - 100, 50, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + userMetricsList.add(createUserMetrics("FOO", 1, 2, 4)); + userMetricsList.add(createUserMetrics("BAR", 2, 3, 3)); + regionMetricsList.add(createRegionMetrics("table1,,1.00000000000000000000000000000000.", 100, + 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.1f, 100, 100, "2019-07-22 00:00:00")); - regionMetricsList.add(createRegionMetrics( - "table2,1,2.00000000000000000000000000000001.", - 200, 100, 200, - new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + regionMetricsList.add(createRegionMetrics("table2,1,2.00000000000000000000000000000001.", 200, + 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.2f, 50, 200, "2019-07-22 00:00:01")); - regionMetricsList.add(createRegionMetrics( - "namespace:table3,,3_0001.00000000000000000000000000000002.", - 300, 150, 300, - new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, - new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); + regionMetricsList + .add(createRegionMetrics("namespace:table3,,3_0001.00000000000000000000000000000002.", 300, + 150, 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + new Size(300, Size.Unit.MEGABYTE), 0.3f, 100, 300, "2019-07-22 00:00:02")); ServerName host1 = ServerName.valueOf("host1.apache.com", 1000, 1); - serverMetricsMap.put(host1, createServerMetrics(host1, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 100, - regionMetricsList, userMetricsList)); + serverMetricsMap.put(host1, createServerMetrics(host1, 100, new Size(100, Size.Unit.MEGABYTE), + new Size(200, Size.Unit.MEGABYTE), 100, regionMetricsList, userMetricsList)); // host2 regionMetricsList.clear(); userMetricsList.clear(); - userMetricsList.add(createUserMetrics("FOO",5,7, 3)); - userMetricsList.add(createUserMetrics("BAR",4,8, 4)); - regionMetricsList.add(createRegionMetrics( - "table1,1,4.00000000000000000000000000000003.", - 100, 50, 100, - new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + userMetricsList.add(createUserMetrics("FOO", 5, 7, 3)); + userMetricsList.add(createUserMetrics("BAR", 4, 8, 4)); + regionMetricsList.add(createRegionMetrics("table1,1,4.00000000000000000000000000000003.", 100, + 50, 100, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, new Size(100, Size.Unit.MEGABYTE), 0.4f, 50, 100, "2019-07-22 00:00:03")); - regionMetricsList.add(createRegionMetrics( - "table2,,5.00000000000000000000000000000004.", - 200, 100, 200, - new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + regionMetricsList.add(createRegionMetrics("table2,,5.00000000000000000000000000000004.", 200, + 100, 200, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, new Size(200, Size.Unit.MEGABYTE), 0.5f, 150, 200, "2019-07-22 00:00:04")); - regionMetricsList.add(createRegionMetrics( - "namespace:table3,,6.00000000000000000000000000000005.", - 300, 150, 300, - new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, - new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); + regionMetricsList + .add(createRegionMetrics("namespace:table3,,6.00000000000000000000000000000005.", 300, 150, + 300, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + new Size(300, Size.Unit.MEGABYTE), 0.6f, 200, 300, "2019-07-22 00:00:05")); ServerName host2 = ServerName.valueOf("host2.apache.com", 1001, 2); - serverMetricsMap.put(host2, createServerMetrics(host2, 200, - new Size(16, Size.Unit.GIGABYTE), new Size(32, Size.Unit.GIGABYTE), 200, - regionMetricsList, userMetricsList)); + serverMetricsMap.put(host2, createServerMetrics(host2, 200, new Size(16, Size.Unit.GIGABYTE), + new Size(32, Size.Unit.GIGABYTE), 200, regionMetricsList, userMetricsList)); ServerName host3 = ServerName.valueOf("host3.apache.com", 1002, 3); - return ClusterMetricsBuilder.newBuilder() - .setHBaseVersion("3.0.0-SNAPSHOT") - .setClusterId("01234567-89ab-cdef-0123-456789abcdef") - .setLiveServerMetrics(serverMetricsMap) - .setDeadServerNames(Collections.singletonList(host3)) - .setRegionsInTransition(Collections.singletonList( - new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) - .setStartKey(new byte [0]) - .setEndKey(new byte [0]) - .setOffline(true) - .setReplicaId(0) - .setRegionId(0) - .setSplit(false) - .build(), - RegionState.State.OFFLINE, host3))) - .build(); + return ClusterMetricsBuilder.newBuilder().setHBaseVersion("3.0.0-SNAPSHOT") + .setClusterId("01234567-89ab-cdef-0123-456789abcdef").setLiveServerMetrics(serverMetricsMap) + .setDeadServerNames(Collections.singletonList(host3)) + .setRegionsInTransition(Collections + .singletonList(new RegionState(RegionInfoBuilder.newBuilder(TableName.valueOf("table4")) + .setStartKey(new byte[0]).setEndKey(new byte[0]).setOffline(true).setReplicaId(0) + .setRegionId(0).setSplit(false).build(), RegionState.State.OFFLINE, host3))) + .build(); } private static UserMetrics createUserMetrics(String user, long readRequestCount, long writeRequestCount, long filteredReadRequestsCount) { - return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)).addClientMetris( - new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, readRequestCount, - writeRequestCount, filteredReadRequestsCount)).addClientMetris( - new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, readRequestCount, - writeRequestCount, filteredReadRequestsCount)).build(); + return UserMetricsBuilder.newBuilder(Bytes.toBytes(user)) + .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_A_" + user, + readRequestCount, writeRequestCount, filteredReadRequestsCount)) + .addClientMetris(new UserMetricsBuilder.ClientMetricsImpl("CLIENT_B_" + user, + readRequestCount, writeRequestCount, filteredReadRequestsCount)) + .build(); } private static RegionMetrics createRegionMetrics(String regionName, long readRequestCount, - long filteredReadRequestCount, long writeRequestCount, Size storeFileSize, - Size uncompressedStoreFileSize, int storeFileCount, Size memStoreSize, float locality, - long compactedCellCount, long compactingCellCount, String lastMajorCompactionTime) { + long filteredReadRequestCount, long writeRequestCount, Size storeFileSize, + Size uncompressedStoreFileSize, int storeFileCount, Size memStoreSize, float locality, + long compactedCellCount, long compactingCellCount, String lastMajorCompactionTime) { FastDateFormat df = FastDateFormat.getInstance("yyyy-MM-dd HH:mm:ss"); try { return RegionMetricsBuilder.newBuilder(Bytes.toBytes(regionName)) - .setReadRequestCount(readRequestCount) - .setFilteredReadRequestCount(filteredReadRequestCount) - .setWriteRequestCount(writeRequestCount).setStoreFileSize(storeFileSize) - .setUncompressedStoreFileSize(uncompressedStoreFileSize).setStoreFileCount(storeFileCount) - .setMemStoreSize(memStoreSize).setDataLocality(locality) - .setCompactedCellCount(compactedCellCount).setCompactingCellCount(compactingCellCount) - .setLastMajorCompactionTimestamp(df.parse(lastMajorCompactionTime).getTime()).build(); + .setReadRequestCount(readRequestCount) + .setFilteredReadRequestCount(filteredReadRequestCount) + .setWriteRequestCount(writeRequestCount).setStoreFileSize(storeFileSize) + .setUncompressedStoreFileSize(uncompressedStoreFileSize).setStoreFileCount(storeFileCount) + .setMemStoreSize(memStoreSize).setDataLocality(locality) + .setCompactedCellCount(compactedCellCount).setCompactingCellCount(compactingCellCount) + .setLastMajorCompactionTimestamp(df.parse(lastMajorCompactionTime).getTime()).build(); } catch (ParseException e) { throw new IllegalArgumentException(e); } } private static ServerMetrics createServerMetrics(ServerName serverName, long reportTimestamp, - Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond, - List regionMetricsList, List userMetricsList) { - - return ServerMetricsBuilder.newBuilder(serverName) - .setReportTimestamp(reportTimestamp) - .setUsedHeapSize(usedHeapSize) - .setMaxHeapSize(maxHeapSize) - .setRequestCountPerSecond(requestCountPerSecond) - .setRegionMetrics(regionMetricsList) - .setUserMetrics(userMetricsList).build(); + Size usedHeapSize, Size maxHeapSize, long requestCountPerSecond, + List regionMetricsList, List userMetricsList) { + + return ServerMetricsBuilder.newBuilder(serverName).setReportTimestamp(reportTimestamp) + .setUsedHeapSize(usedHeapSize).setMaxHeapSize(maxHeapSize) + .setRequestCountPerSecond(requestCountPerSecond).setRegionMetrics(regionMetricsList) + .setUserMetrics(userMetricsList).build(); } public static void assertRecordsInRegionMode(List records) { @@ -174,48 +151,44 @@ public static void assertRecordsInRegionMode(List records) { switch (record.get(Field.REGION_NAME).asString()) { case "table1,,1.00000000000000000000000000000000.": assertRecordInRegionMode(record, "default", "1", "", "table1", - "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, - new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, - "2019-07-22 00:00:00"); + "00000000000000000000000000000000", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + new Size(100, Size.Unit.MEGABYTE), 0.1f, "", 100L, 100L, 100f, "2019-07-22 00:00:00"); break; case "table1,1,4.00000000000000000000000000000003.": assertRecordInRegionMode(record, "default", "4", "", "table1", - "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, - new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, - "2019-07-22 00:00:03"); + "00000000000000000000000000000003", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(100, Size.Unit.MEGABYTE), new Size(200, Size.Unit.MEGABYTE), 1, + new Size(100, Size.Unit.MEGABYTE), 0.4f, "1", 100L, 50L, 50f, "2019-07-22 00:00:03"); break; case "table2,,5.00000000000000000000000000000004.": assertRecordInRegionMode(record, "default", "5", "", "table2", - "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, - new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, - "2019-07-22 00:00:04"); + "00000000000000000000000000000004", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + new Size(200, Size.Unit.MEGABYTE), 0.5f, "", 200L, 150L, 75f, "2019-07-22 00:00:04"); break; case "table2,1,2.00000000000000000000000000000001.": assertRecordInRegionMode(record, "default", "2", "", "table2", - "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, - new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, - "2019-07-22 00:00:01"); + "00000000000000000000000000000001", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(200, Size.Unit.MEGABYTE), new Size(400, Size.Unit.MEGABYTE), 2, + new Size(200, Size.Unit.MEGABYTE), 0.2f, "1", 200L, 50L, 25f, "2019-07-22 00:00:01"); break; case "namespace:table3,,6.00000000000000000000000000000005.": assertRecordInRegionMode(record, "namespace", "6", "", "table3", - "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2",0L, - 0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + "00000000000000000000000000000005", "host2:1001", "host2.apache.com,1001,2", 0L, 0L, 0L, + 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.6f, "", 300L, 200L, 66.66667f, "2019-07-22 00:00:05"); break; case "namespace:table3,,3_0001.00000000000000000000000000000002.": assertRecordInRegionMode(record, "namespace", "3", "1", "table3", - "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1",0L, - 0L, 0L, 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, + "00000000000000000000000000000002", "host1:1000", "host1.apache.com,1000,1", 0L, 0L, 0L, + 0L, new Size(300, Size.Unit.MEGABYTE), new Size(600, Size.Unit.MEGABYTE), 3, new Size(300, Size.Unit.MEGABYTE), 0.3f, "", 300L, 100L, 33.333336f, "2019-07-22 00:00:02"); break; @@ -227,12 +200,12 @@ public static void assertRecordsInRegionMode(List records) { } private static void assertRecordInRegionMode(Record record, String namespace, String startCode, - String replicaId, String table, String region, String regionServer, String longRegionServer, - long requestCountPerSecond, long readRequestCountPerSecond, - long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, - Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, - Size memStoreSize, float locality, String startKey, long compactingCellCount, - long compactedCellCount, float compactionProgress, String lastMajorCompactionTime) { + String replicaId, String table, String region, String regionServer, String longRegionServer, + long requestCountPerSecond, long readRequestCountPerSecond, + long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, + Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, float locality, + String startKey, long compactingCellCount, long compactedCellCount, float compactionProgress, + String lastMajorCompactionTime) { assertThat(record.size(), is(22)); assertThat(record.get(Field.NAMESPACE).asString(), is(namespace)); assertThat(record.get(Field.START_CODE).asString(), is(startCode)); @@ -241,8 +214,7 @@ private static void assertRecordInRegionMode(Record record, String namespace, St assertThat(record.get(Field.REGION).asString(), is(region)); assertThat(record.get(Field.REGION_SERVER).asString(), is(regionServer)); assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -285,12 +257,11 @@ public static void assertRecordsInNamespaceMode(List records) { } private static void assertRecordInNamespaceMode(Record record, long requestCountPerSecond, - long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, - long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, - int numStoreFiles, Size memStoreSize, int regionCount) { + long readRequestCountPerSecond, long filteredReadRequestCountPerSecond, + long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, + int numStoreFiles, Size memStoreSize, int regionCount) { assertThat(record.size(), is(10)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -339,7 +310,7 @@ public static void assertRecordsInUserMode(List records) { for (Record record : records) { String user = record.get(Field.USER).asString(); switch (user) { - //readRequestPerSecond and writeRequestPerSecond will be zero + // readRequestPerSecond and writeRequestPerSecond will be zero // because there is no change or new metrics during refresh case "FOO": assertRecordInUserMode(record, 0L, 0L, 0L); @@ -358,8 +329,8 @@ public static void assertRecordsInClientMode(List records) { for (Record record : records) { String client = record.get(Field.CLIENT).asString(); switch (client) { - //readRequestPerSecond and writeRequestPerSecond will be zero - // because there is no change or new metrics during refresh + // readRequestPerSecond and writeRequestPerSecond will be zero + // because there is no change or new metrics during refresh case "CLIENT_A_FOO": assertRecordInClientMode(record, 0L, 0L, 0L); break; @@ -382,11 +353,11 @@ private static void assertRecordInUserMode(Record record, long readRequestCountP long writeCountRequestPerSecond, long filteredReadRequestsCount) { assertThat(record.size(), is(6)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(readRequestCountPerSecond)); + is(readRequestCountPerSecond)); assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), - is(writeCountRequestPerSecond)); + is(writeCountRequestPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(filteredReadRequestsCount)); + is(filteredReadRequestsCount)); assertThat(record.get(Field.CLIENT_COUNT).asInt(), is(2)); } @@ -394,11 +365,11 @@ private static void assertRecordInClientMode(Record record, long readRequestCoun long writeCountRequestPerSecond, long filteredReadRequestsCount) { assertThat(record.size(), is(6)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(readRequestCountPerSecond)); + is(readRequestCountPerSecond)); assertThat(record.get(Field.WRITE_REQUEST_COUNT_PER_SECOND).asLong(), - is(writeCountRequestPerSecond)); + is(writeCountRequestPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), - is(filteredReadRequestsCount)); + is(filteredReadRequestsCount)); assertThat(record.get(Field.USER_COUNT).asInt(), is(1)); } @@ -407,8 +378,7 @@ private static void assertRecordInTableMode(Record record, long requestCountPerS long writeCountRequestPerSecond, Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount) { assertThat(record.size(), is(11)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), @@ -449,15 +419,13 @@ public static void assertRecordsInRegionServerMode(List records) { } private static void assertRecordInRegionServerMode(Record record, String longRegionServer, - long requestCountPerSecond, long readRequestCountPerSecond, - long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, - Size storeFileSize, Size uncompressedStoreFileSize, int numStoreFiles, - Size memStoreSize, int regionCount, Size usedHeapSize, Size maxHeapSize) { + long requestCountPerSecond, long readRequestCountPerSecond, + long filteredReadRequestCountPerSecond, long writeCountRequestPerSecond, Size storeFileSize, + Size uncompressedStoreFileSize, int numStoreFiles, Size memStoreSize, int regionCount, + Size usedHeapSize, Size maxHeapSize) { assertThat(record.size(), is(13)); - assertThat(record.get(Field.LONG_REGION_SERVER).asString(), - is(longRegionServer)); - assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), - is(requestCountPerSecond)); + assertThat(record.get(Field.LONG_REGION_SERVER).asString(), is(longRegionServer)); + assertThat(record.get(Field.REQUEST_COUNT_PER_SECOND).asLong(), is(requestCountPerSecond)); assertThat(record.get(Field.READ_REQUEST_COUNT_PER_SECOND).asLong(), is(readRequestCountPerSecond)); assertThat(record.get(Field.FILTERED_READ_REQUEST_COUNT_PER_SECOND).asLong(), diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java index dcbdb6b9b8ab..f71125ff10e4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/field/TestFieldValue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,13 +28,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestFieldValue { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFieldValue.class); + HBaseClassTestRule.forClass(TestFieldValue.class); @Test public void testParseAndAsSomethingMethod() { @@ -101,7 +100,7 @@ public void testParseAndAsSomethingMethod() { // Size FieldValue sizeFieldValue = - new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("100.0MB")); assertThat(sizeFieldValue.asSize(), is(new Size(100, Size.Unit.MEGABYTE))); @@ -122,8 +121,7 @@ public void testParseAndAsSomethingMethod() { } // Percent - FieldValue percentFieldValue = - new FieldValue(100f, FieldValueType.PERCENT); + FieldValue percentFieldValue = new FieldValue(100f, FieldValueType.PERCENT); assertThat(percentFieldValue.asString(), is("100.00%")); assertThat(percentFieldValue.asFloat(), is(100f)); @@ -184,11 +182,11 @@ public void testCompareTo() { // Size FieldValue size100MBFieldValue = - new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); FieldValue size100MBFieldValue2 = - new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); FieldValue size200MBFieldValue = - new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(size100MBFieldValue.compareTo(size100MBFieldValue2), is(0)); assertThat(size200MBFieldValue.compareTo(size100MBFieldValue), is(1)); @@ -228,9 +226,9 @@ public void testPlus() { // Size FieldValue sizeFieldValue = - new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(100, Size.Unit.MEGABYTE), FieldValueType.SIZE); FieldValue sizeFieldValue2 = - new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(200, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.plus(sizeFieldValue2).asString(), is("300.0MB")); assertThat(sizeFieldValue.plus(sizeFieldValue2).asSize(), is(new Size(300, Size.Unit.MEGABYTE))); @@ -255,44 +253,35 @@ public void testCompareToIgnoreCase() { @Test public void testOptimizeSize() { - FieldValue sizeFieldValue = - new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE); + FieldValue sizeFieldValue = new FieldValue(new Size(1, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0B")); - sizeFieldValue = - new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0KB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.BYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0KB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0MB")); - sizeFieldValue = - new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024 * 1024, Size.Unit.KILOBYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0GB")); sizeFieldValue = - new FieldValue(new Size(2 * 1024 * 1024, Size.Unit.MEGABYTE), FieldValueType.SIZE); + new FieldValue(new Size(2 * 1024 * 1024, Size.Unit.MEGABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0TB")); - sizeFieldValue = - new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(2 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("2.0PB")); - sizeFieldValue = - new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024 * 1024, Size.Unit.TERABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1024.0PB")); - sizeFieldValue = - new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1, Size.Unit.PETABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1.0PB")); - sizeFieldValue = - new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE); + sizeFieldValue = new FieldValue(new Size(1024, Size.Unit.PETABYTE), FieldValueType.SIZE); assertThat(sizeFieldValue.asString(), is("1024.0PB")); } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java index 4f0864838532..2a958cc8a857 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestClientMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,18 +33,22 @@ @Category(SmallTests.class) public class TestClientMode extends TestModeBase { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestClientMode.class); - @Override protected Mode getMode() { + @Override + protected Mode getMode() { return Mode.CLIENT; } - @Override protected void assertRecords(List records) { + @Override + protected void assertRecords(List records) { TestUtils.assertRecordsInClientMode(records); } - @Override protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) { + @Override + protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo) { assertThat(drillDownInfo.getNextMode(), is(Mode.USER)); assertThat(drillDownInfo.getInitialFilters().size(), is(1)); String client = currentRecord.get(Field.CLIENT).asString(); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java index a52b332265b0..2d29fc414605 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestModeBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,23 +22,21 @@ import org.apache.hadoop.hbase.hbtop.TestUtils; import org.junit.Test; - public abstract class TestModeBase { @Test public void testGetRecords() { - List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), - null); + List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), null); assertRecords(records); } protected abstract Mode getMode(); + protected abstract void assertRecords(List records); @Test public void testDrillDown() { - List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), - null); + List records = getMode().getRecords(TestUtils.createDummyClusterMetrics(), null); for (Record record : records) { assertDrillDown(record, getMode().drillDown(record)); } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java index 6c498e94eb1d..3a40401b7721 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestNamespaceMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,13 +30,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestNamespaceMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestNamespaceMode.class); + HBaseClassTestRule.forClass(TestNamespaceMode.class); @Override protected Mode getMode() { @@ -59,8 +58,7 @@ protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo break; case "namespace": - assertThat(drillDownInfo.getInitialFilters().get(0).toString(), - is("NAMESPACE==namespace")); + assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==namespace")); break; default: diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java index b705531475f3..8480ed946aed 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,13 +29,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRegionMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionMode.class); + HBaseClassTestRule.forClass(TestRegionMode.class); @Override protected Mode getMode() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java index cbfc7283fc64..9065ee5da2bf 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRegionServerMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,13 +30,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRegionServerMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRegionServerMode.class); + HBaseClassTestRule.forClass(TestRegionServerMode.class); @Override protected Mode getMode() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java index a73d54ea6bb9..705687d9146d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestRequestCountPerSecond.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,13 +26,12 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestRequestCountPerSecond { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRequestCountPerSecond.class); + HBaseClassTestRule.forClass(TestRequestCountPerSecond.class); @Test public void test() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java index f718304671c4..574d9acb1c9f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestTableMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,13 +30,12 @@ import org.junit.ClassRule; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestTableMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableMode.class); + HBaseClassTestRule.forClass(TestTableMode.class); @Override protected Mode getMode() { @@ -68,8 +67,7 @@ protected void assertDrillDown(Record currentRecord, DrillDownInfo drillDownInfo break; case "namespace:table3": - assertThat(drillDownInfo.getInitialFilters().get(0).toString(), - is("NAMESPACE==namespace")); + assertThat(drillDownInfo.getInitialFilters().get(0).toString(), is("NAMESPACE==namespace")); assertThat(drillDownInfo.getInitialFilters().get(1).toString(), is("TABLE==table3")); break; diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java index f094c85f5481..05a2b5a8ad00 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/mode/TestUserMode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestUserMode extends TestModeBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestUserMode.class); + HBaseClassTestRule.forClass(TestUserMode.class); @Override protected Mode getMode() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java index cbf740430b0a..9c41595e2625 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/field/TestFieldScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,14 +44,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestFieldScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFieldScreenPresenter.class); + HBaseClassTestRule.forClass(TestFieldScreenPresenter.class); @Mock private FieldScreenView fieldScreenView; @@ -71,17 +70,15 @@ public class TestFieldScreenPresenter { @Before public void setup() { Field sortField = Mode.REGION.getDefaultSortField(); - fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); - fieldDisplayMap = Mode.REGION.getFieldInfos().stream() - .collect(() -> new EnumMap<>(Field.class), - (r, fi) -> r.put(fi.getField(), fi.isDisplayByDefault()), (r1, r2) -> {}); + fieldDisplayMap = Mode.REGION.getFieldInfos().stream().collect(() -> new EnumMap<>(Field.class), + (r, fi) -> r.put(fi.getField(), fi.isDisplayByDefault()), (r1, r2) -> { + }); - fieldScreenPresenter = - new FieldScreenPresenter(fieldScreenView, sortField, fields, fieldDisplayMap, resultListener, - topScreenView); + fieldScreenPresenter = new FieldScreenPresenter(fieldScreenView, sortField, fields, + fieldDisplayMap, resultListener, topScreenView); for (int i = 0; i < fields.size(); i++) { Field field = fields.get(i); @@ -122,8 +119,8 @@ public void testChangeSortField() { inOrder.verify(fieldScreenView).showScreenDescription(eq("LRS")); inOrder.verify(fieldScreenView).showScreenDescription(eq("#READ/S")); inOrder.verify(fieldScreenView).showScreenDescription(eq(fields.get(0).getHeader())); - inOrder.verify(fieldScreenView).showScreenDescription( - eq(fields.get(fields.size() - 1).getHeader())); + inOrder.verify(fieldScreenView) + .showScreenDescription(eq(fields.get(fields.size() - 1).getHeader())); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java index 245bf615e731..4d2e9a092674 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/help/TestHelpScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,14 +34,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestHelpScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestHelpScreenPresenter.class); + HBaseClassTestRule.forClass(TestHelpScreenPresenter.class); private static final long TEST_REFRESH_DELAY = 5; @@ -55,8 +54,8 @@ public class TestHelpScreenPresenter { @Before public void setup() { - helpScreenPresenter = new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY, - topScreenView); + helpScreenPresenter = + new HelpScreenPresenter(helpScreenView, TEST_REFRESH_DELAY, topScreenView); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java index 1b7e12a6240f..d077792a1de1 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/mode/TestModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,14 +37,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestModeScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestModeScreenPresenter.class); + HBaseClassTestRule.forClass(TestModeScreenPresenter.class); @Mock private ModeScreenView modeScreenView; @@ -69,7 +68,7 @@ public void testInit() { int modeDescriptionMaxLength = Mode.REGION_SERVER.getDescription().length(); verify(modeScreenView).showModeScreen(eq(Mode.REGION), eq(Arrays.asList(Mode.values())), - eq(Mode.REGION.ordinal()) , eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength)); + eq(Mode.REGION.ordinal()), eq(modeHeaderMaxLength), eq(modeDescriptionMaxLength)); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java index 414b5b0702c5..48aa7af7680d 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestFilterDisplayModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,14 +39,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestFilterDisplayModeScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestFilterDisplayModeScreenPresenter.class); + HBaseClassTestRule.forClass(TestFilterDisplayModeScreenPresenter.class); @Mock private FilterDisplayModeScreenView filterDisplayModeScreenView; @@ -58,24 +57,23 @@ public class TestFilterDisplayModeScreenPresenter { @Before public void setup() { - List fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + List fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); - List filters = new ArrayList<>(); + List filters = new ArrayList<>(); filters.add(RecordFilter.parse("NAMESPACE==namespace", fields, true)); filters.add(RecordFilter.parse("TABLE==table", fields, true)); - filterDisplayModeScreenPresenter = new FilterDisplayModeScreenPresenter( - filterDisplayModeScreenView, filters, topScreenView); + filterDisplayModeScreenPresenter = + new FilterDisplayModeScreenPresenter(filterDisplayModeScreenView, filters, topScreenView); } @Test public void testInit() { filterDisplayModeScreenPresenter.init(); - verify(filterDisplayModeScreenView).showFilters(argThat(filters -> filters.size() == 2 - && filters.get(0).toString().equals("NAMESPACE==namespace") - && filters.get(1).toString().equals("TABLE==table"))); + verify(filterDisplayModeScreenView).showFilters(argThat( + filters -> filters.size() == 2 && filters.get(0).toString().equals("NAMESPACE==namespace") + && filters.get(1).toString().equals("TABLE==table"))); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java index b5e9bb9f3ba6..1044b116bc8e 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestInputModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,14 +40,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestInputModeScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestInputModeScreenPresenter.class); + HBaseClassTestRule.forClass(TestInputModeScreenPresenter.class); private static final String TEST_INPUT_MESSAGE = "test input message"; @@ -68,8 +67,8 @@ public void setup() { histories.add("history1"); histories.add("history2"); - inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView, - TEST_INPUT_MESSAGE, histories, resultListener); + inputModeScreenPresenter = new InputModeScreenPresenter(inputModeScreenView, TEST_INPUT_MESSAGE, + histories, resultListener); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java index 0acd79c56d2d..018d7d03252f 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestMessageModeScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,14 +32,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestMessageModeScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestMessageModeScreenPresenter.class); + HBaseClassTestRule.forClass(TestMessageModeScreenPresenter.class); private static final String TEST_MESSAGE = "test message"; @@ -53,8 +52,8 @@ public class TestMessageModeScreenPresenter { @Before public void setup() { - messageModeScreenPresenter = new MessageModeScreenPresenter(messageModeScreenView, - TEST_MESSAGE, topScreenView); + messageModeScreenPresenter = + new MessageModeScreenPresenter(messageModeScreenView, TEST_MESSAGE, topScreenView); } @Test diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java index e0c09dfe1673..4b55cb2e787b 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestPaging.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,13 +26,11 @@ import org.junit.Test; import org.junit.experimental.categories.Category; - @Category(SmallTests.class) public class TestPaging { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestPaging.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestPaging.class); @Test public void testArrowUpAndArrowDown() { @@ -292,7 +290,7 @@ public void testWhenChangingRecordsSizeDynamically() { } private void assertPaging(Paging paging, int currentPosition, int pageStartPosition, - int pageEndPosition) { + int pageEndPosition) { assertThat(paging.getCurrentPosition(), is(currentPosition)); assertThat(paging.getPageStartPosition(), is(pageStartPosition)); assertThat(paging.getPageEndPosition(), is(pageEndPosition)); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java index 44a8878407a0..a57a15db0aff 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenModel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,14 +44,13 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestTopScreenModel { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTopScreenModel.class); + HBaseClassTestRule.forClass(TestTopScreenModel.class); @Mock private Admin admin; @@ -65,9 +64,8 @@ public void setup() throws IOException { when(admin.getClusterMetrics()).thenReturn(TestUtils.createDummyClusterMetrics()); topScreenModel = new TopScreenModel(admin, Mode.REGION, null, null, null, null); - fields = Mode.REGION.getFieldInfos().stream() - .map(FieldInfo::getField) - .collect(Collectors.toList()); + fields = + Mode.REGION.getFieldInfos().stream().map(FieldInfo::getField).collect(Collectors.toList()); } @Test @@ -172,9 +170,9 @@ public void testSwitchMode() { assertThat(topScreenModel.getCurrentMode(), is(Mode.TABLE)); // Test for initialFilters - List initialFilters = Arrays.asList( - RecordFilter.parse("TABLE==table1", fields, true), - RecordFilter.parse("TABLE==table2", fields, true)); + List initialFilters = + Arrays.asList(RecordFilter.parse("TABLE==table1", fields, true), + RecordFilter.parse("TABLE==table2", fields, true)); topScreenModel.switchMode(Mode.TABLE, false, initialFilters); diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java index d218dd52950d..c10413da9bbb 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/screen/top/TestTopScreenPresenter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,39 +42,28 @@ import org.mockito.Mock; import org.mockito.runners.MockitoJUnitRunner; - @Category(SmallTests.class) @RunWith(MockitoJUnitRunner.class) public class TestTopScreenPresenter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTopScreenPresenter.class); + HBaseClassTestRule.forClass(TestTopScreenPresenter.class); private static final List TEST_FIELD_INFOS = Arrays.asList( - new FieldInfo(Field.REGION, 10, true), - new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), - new FieldInfo(Field.LOCALITY, 10, true) - ); + new FieldInfo(Field.REGION, 10, true), new FieldInfo(Field.REQUEST_COUNT_PER_SECOND, 10, true), + new FieldInfo(Field.LOCALITY, 10, true)); private static final List TEST_RECORDS = Arrays.asList( - Record.ofEntries( - entry(Field.REGION, "region1"), - entry(Field.REQUEST_COUNT_PER_SECOND, 1L), + Record.ofEntries(entry(Field.REGION, "region1"), entry(Field.REQUEST_COUNT_PER_SECOND, 1L), entry(Field.LOCALITY, 0.3f)), - Record.ofEntries( - entry(Field.REGION, "region2"), - entry(Field.REQUEST_COUNT_PER_SECOND, 2L), + Record.ofEntries(entry(Field.REGION, "region2"), entry(Field.REQUEST_COUNT_PER_SECOND, 2L), entry(Field.LOCALITY, 0.2f)), - Record.ofEntries( - entry(Field.REGION, "region3"), - entry(Field.REQUEST_COUNT_PER_SECOND, 3L), - entry(Field.LOCALITY, 0.1f)) - ); + Record.ofEntries(entry(Field.REGION, "region3"), entry(Field.REQUEST_COUNT_PER_SECOND, 3L), + entry(Field.LOCALITY, 0.1f))); - private static final Summary TEST_SUMMARY = new Summary( - "00:00:01", "3.0.0-SNAPSHOT", "01234567-89ab-cdef-0123-456789abcdef", - 3, 2, 1, 6, 1, 3.0, 300); + private static final Summary TEST_SUMMARY = new Summary("00:00:01", "3.0.0-SNAPSHOT", + "01234567-89ab-cdef-0123-456789abcdef", 3, 2, 1, 6, 1, 3.0, 300); @Mock private TopScreenView topScreenView; @@ -90,13 +79,13 @@ public void setup() { when(topScreenView.getPageSize()).thenReturn(100); when(topScreenModel.getFieldInfos()).thenReturn(TEST_FIELD_INFOS); - when(topScreenModel.getFields()).thenReturn(TEST_FIELD_INFOS.stream() - .map(FieldInfo::getField).collect(Collectors.toList())); + when(topScreenModel.getFields()).thenReturn( + TEST_FIELD_INFOS.stream().map(FieldInfo::getField).collect(Collectors.toList())); when(topScreenModel.getRecords()).thenReturn(TEST_RECORDS); when(topScreenModel.getSummary()).thenReturn(TEST_SUMMARY); - topScreenPresenter = new TopScreenPresenter(topScreenView, 3000, topScreenModel, - null, Long.MAX_VALUE); + topScreenPresenter = + new TopScreenPresenter(topScreenView, 3000, topScreenModel, null, Long.MAX_VALUE); } @Test @@ -104,8 +93,8 @@ public void testRefresh() { topScreenPresenter.init(); topScreenPresenter.refresh(true); - verify(topScreenView).showTopScreen(argThat(this::assertSummary), - argThat(this::assertHeaders), argThat(this::assertRecords), + verify(topScreenView).showTopScreen(argThat(this::assertSummary), argThat(this::assertHeaders), + argThat(this::assertRecords), argThat(selectedRecord -> assertSelectedRecord(selectedRecord, 0))); } @@ -199,21 +188,20 @@ private void verifyHorizontalScrolling(InOrder inOrder, int expectedHeaderCount) private boolean assertSummary(Summary actual) { return actual.getCurrentTime().equals(TEST_SUMMARY.getCurrentTime()) - && actual.getVersion().equals(TEST_SUMMARY.getVersion()) - && actual.getClusterId().equals(TEST_SUMMARY.getClusterId()) - && actual.getServers() == TEST_SUMMARY.getServers() - && actual.getLiveServers() == TEST_SUMMARY.getLiveServers() - && actual.getDeadServers() == TEST_SUMMARY.getDeadServers() - && actual.getRegionCount() == TEST_SUMMARY.getRegionCount() - && actual.getRitCount() == TEST_SUMMARY.getRitCount() - && actual.getAverageLoad() == TEST_SUMMARY.getAverageLoad() - && actual.getAggregateRequestPerSecond() == TEST_SUMMARY.getAggregateRequestPerSecond(); + && actual.getVersion().equals(TEST_SUMMARY.getVersion()) + && actual.getClusterId().equals(TEST_SUMMARY.getClusterId()) + && actual.getServers() == TEST_SUMMARY.getServers() + && actual.getLiveServers() == TEST_SUMMARY.getLiveServers() + && actual.getDeadServers() == TEST_SUMMARY.getDeadServers() + && actual.getRegionCount() == TEST_SUMMARY.getRegionCount() + && actual.getRitCount() == TEST_SUMMARY.getRitCount() + && actual.getAverageLoad() == TEST_SUMMARY.getAverageLoad() + && actual.getAggregateRequestPerSecond() == TEST_SUMMARY.getAggregateRequestPerSecond(); } private boolean assertHeaders(List
    actual) { - List
    expected = - TEST_FIELD_INFOS.stream().map(fi -> new Header(fi.getField(), fi.getDefaultLength())) - .collect(Collectors.toList()); + List
    expected = TEST_FIELD_INFOS.stream() + .map(fi -> new Header(fi.getField(), fi.getDefaultLength())).collect(Collectors.toList()); if (actual.size() != expected.size()) { return false; @@ -250,8 +238,9 @@ private boolean assertSelectedRecord(Record actual, int expectedSelectedRecodeIn } private boolean assertRecord(Record actual, Record expected) { - return actual.get(Field.REGION).equals(expected.get(Field.REGION)) && actual - .get(Field.REQUEST_COUNT_PER_SECOND).equals(expected.get(Field.REQUEST_COUNT_PER_SECOND)) - && actual.get(Field.LOCALITY).equals(expected.get(Field.LOCALITY)); + return actual.get(Field.REGION).equals(expected.get(Field.REGION)) + && actual.get(Field.REQUEST_COUNT_PER_SECOND) + .equals(expected.get(Field.REQUEST_COUNT_PER_SECOND)) + && actual.get(Field.LOCALITY).equals(expected.get(Field.LOCALITY)); } } diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java index 304c92b8497e..3458e7ee31b4 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestCursor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; - public final class TestCursor { private TestCursor() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java index ebfe56981c49..6295cd0166aa 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestKeyPress.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,11 +18,9 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; - public final class TestKeyPress { private TestKeyPress() { diff --git a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java index 212395fecaf5..a6a79c4e67de 100644 --- a/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java +++ b/hbase-hbtop/src/test/java/org/apache/hadoop/hbase/hbtop/terminal/impl/TestTerminalPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,12 +18,10 @@ package org.apache.hadoop.hbase.hbtop.terminal.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.hbtop.terminal.KeyPress; import org.apache.hadoop.hbase.hbtop.terminal.Terminal; import org.apache.hadoop.hbase.hbtop.terminal.TerminalPrinter; - public final class TestTerminalPrinter { private TestTerminalPrinter() { @@ -38,8 +36,8 @@ public static void main(String[] args) throws Exception { printer.print("Normal string").endOfLine(); printer.startHighlight().print("Highlighted string").stopHighlight().endOfLine(); printer.startBold().print("Bold string").stopBold().endOfLine(); - printer.startHighlight().startBold().print("Highlighted bold string") - .stopBold().stopHighlight().endOfLine(); + printer.startHighlight().startBold().print("Highlighted bold string").stopBold() + .stopHighlight().endOfLine(); printer.endOfLine(); printer.print("Press any key to finish").endOfLine(); diff --git a/hbase-http/pom.xml b/hbase-http/pom.xml index 6ad704984194..04063d15ab0a 100644 --- a/hbase-http/pom.xml +++ b/hbase-http/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-http Apache HBase - HTTP HTTP functionality for HBase Servers - - - - - - - src/test/resources/META-INF/ - META-INF/ - - NOTICE - - true - - - src/test/resources - - **/** - - - - - - - org.apache.maven.plugins - maven-remote-resources-plugin - - - default - - false - - ${build.year} - ${license.debug.print.included} - ${license.bundles.dependencies} - ${license.bundles.jquery} - ${license.bundles.vega} - ${license.bundles.logo} - ${license.bundles.bootstrap} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - ${project.groupId}:hbase-resource-bundle:${project.version} - - - supplemental-models.xml - - - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - package - - jar - test-jar - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -267,6 +167,106 @@ test + + + + + + + META-INF/ + true + src/test/resources/META-INF/ + + NOTICE + + + + src/test/resources + + **/** + + + + + + + org.apache.maven.plugins + maven-remote-resources-plugin + + + default + + false + + ${build.year} + ${license.debug.print.included} + ${license.bundles.dependencies} + ${license.bundles.jquery} + ${license.bundles.vega} + ${license.bundles.logo} + ${license.bundles.bootstrap} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + ${project.groupId}:hbase-resource-bundle:${project.version} + + + supplemental-models.xml + + + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + jar + test-jar + + package + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + build-with-jdk11 @@ -291,10 +291,10 @@ license-javadocs - prepare-package copy-resources + prepare-package ${project.build.directory}/apidocs @@ -338,8 +338,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -350,6 +351,7 @@ org.apache.hadoop hadoop-minicluster + test com.google.guava @@ -360,7 +362,6 @@ zookeeper - test org.apache.hadoop @@ -379,10 +380,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources @@ -506,7 +507,7 @@ - + diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java index 215ff37e3bf5..833207793352 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedFilter.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -27,7 +26,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.yetus.audience.InterfaceAudience; @@ -38,11 +36,12 @@ public class AdminAuthorizedFilter implements Filter { private Configuration conf; private AccessControlList adminsAcl; - @Override public void init(FilterConfig filterConfig) throws ServletException { - adminsAcl = (AccessControlList) filterConfig.getServletContext().getAttribute( - HttpServer.ADMINS_ACL); - conf = (Configuration) filterConfig.getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + @Override + public void init(FilterConfig filterConfig) throws ServletException { + adminsAcl = + (AccessControlList) filterConfig.getServletContext().getAttribute(HttpServer.ADMINS_ACL); + conf = (Configuration) filterConfig.getServletContext() + .getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); } @Override @@ -61,5 +60,7 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha chain.doFilter(request, response); } - @Override public void destroy() {} + @Override + public void destroy() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java index 10156f43b445..c1dab3027b08 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/AdminAuthorizedServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,9 @@ public class AdminAuthorizedServlet extends DefaultServlet { @Override protected void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { + throws ServletException, IOException { // Do the authorization - if (HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (HttpServer.hasAdministratorAccess(getServletContext(), request, response)) { // Authorization is done. Just call super. super.doGet(request, response); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java index 0f0c7150c417..5dce5960d071 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ClickjackingPreventionFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -28,10 +27,8 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -46,7 +43,7 @@ public void init(FilterConfig filterConfig) throws ServletException { @Override public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.addHeader("X-Frame-Options", filterConfig.getInitParameter("xframeoptions")); chain.doFilter(req, res); @@ -58,8 +55,8 @@ public void destroy() { public static Map getDefaultParameters(Configuration conf) { Map params = new HashMap<>(); - params.put("xframeoptions", conf.get("hbase.http.filter.xframeoptions.mode", - DEFAULT_XFRAMEOPTIONS)); + params.put("xframeoptions", + conf.get("hbase.http.filter.xframeoptions.mode", DEFAULT_XFRAMEOPTIONS)); return params; } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java index 5869ce3f92e8..ed41bab54cd5 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterContainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,9 +32,10 @@ public interface FilterContainer { * @param parameters a map from parameter names to initial values */ void addFilter(String name, String classname, Map parameters); + /** - * Add a global filter to the container - This global filter will be - * applied to all available web contexts. + * Add a global filter to the container - This global filter will be applied to all available web + * contexts. * @param name filter name * @param classname filter class name * @param parameters a map from parameter names to initial values diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java index 7e8595e7d043..917fe24291f0 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/FilterInitializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java index 678b397949f5..e775ba895fc9 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HtmlQuoting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,12 +41,12 @@ public final class HtmlQuoting { * @return does the string contain any of the active html characters? */ public static boolean needsQuoting(byte[] data, int off, int len) { - if (off+len > data.length) { - throw new IllegalStateException("off+len=" + off+len + " should be lower" - + " than data length=" + data.length); + if (off + len > data.length) { + throw new IllegalStateException( + "off+len=" + off + len + " should be lower" + " than data length=" + data.length); } - for(int i=off; i< off+len; ++i) { - switch(data[i]) { + for (int i = off; i < off + len; ++i) { + switch (data[i]) { case '&': case '<': case '>': @@ -70,20 +70,19 @@ public static boolean needsQuoting(String str) { return false; } byte[] bytes = str.getBytes(); - return needsQuoting(bytes, 0 , bytes.length); + return needsQuoting(bytes, 0, bytes.length); } /** - * Quote all of the active HTML characters in the given string as they - * are added to the buffer. + * Quote all of the active HTML characters in the given string as they are added to the buffer. * @param output the stream to write the output to * @param buffer the byte array to take the characters from * @param off the index of the first byte to quote * @param len the number of bytes to quote */ public static void quoteHtmlChars(OutputStream output, byte[] buffer, int off, int len) - throws IOException { - for(int i=off; i < off+len; i++) { + throws IOException { + for (int i = off; i < off + len; i++) { switch (buffer[i]) { case '&': output.write(ampBytes); @@ -138,6 +137,7 @@ public static String quoteHtmlChars(String item) { public static OutputStream quoteOutputStream(final OutputStream out) { return new OutputStream() { private byte[] data = new byte[1]; + @Override public void write(byte[] data, int off, int len) throws IOException { quoteHtmlChars(out, data, off, len); @@ -196,12 +196,11 @@ public static String unquoteHtmlChars(String item) { buffer.append('"'); next += 6; } else { - int end = item.indexOf(';', next)+1; + int end = item.indexOf(';', next) + 1; if (end == 0) { end = len; } - throw new IllegalArgumentException("Bad HTML quoting for " + - item.substring(next,end)); + throw new IllegalArgumentException("Bad HTML quoting for " + item.substring(next, end)); } posn = next; next = item.indexOf('&', posn); @@ -214,15 +213,16 @@ public static void main(String[] args) { if (args.length == 0) { throw new IllegalArgumentException("Please provide some arguments"); } - for(String arg:args) { + for (String arg : args) { System.out.println("Original: " + arg); String quoted = quoteHtmlChars(arg); - System.out.println("Quoted: "+ quoted); + System.out.println("Quoted: " + quoted); String unquoted = unquoteHtmlChars(quoted); System.out.println("Unquoted: " + unquoted); System.out.println(); } } - private HtmlQuoting() {} + private HtmlQuoting() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java index 52c9133dcf63..07de1f7d2963 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpConfig.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import org.apache.hadoop.conf.Configuration; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -29,10 +28,9 @@ @InterfaceStability.Unstable public class HttpConfig { private Policy policy; + public enum Policy { - HTTP_ONLY, - HTTPS_ONLY, - HTTP_AND_HTTPS; + HTTP_ONLY, HTTPS_ONLY, HTTP_AND_HTTPS; public Policy fromString(String value) { if (HTTPS_ONLY.name().equalsIgnoreCase(value)) { @@ -53,8 +51,7 @@ public boolean isHttpsEnabled() { } public HttpConfig(final Configuration conf) { - boolean sslEnabled = conf.getBoolean( - ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, + boolean sslEnabled = conf.getBoolean(ServerConfigurationKeys.HBASE_SSL_ENABLED_KEY, ServerConfigurationKeys.HBASE_SSL_ENABLED_DEFAULT); policy = sslEnabled ? Policy.HTTPS_ONLY : Policy.HTTP_ONLY; if (sslEnabled) { diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java index d3e8005eb9c0..017c45b4f590 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpRequestLog.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ public final class HttpRequestLog { private static final ImmutableMap SERVER_TO_COMPONENT = - ImmutableMap.of("master", "master", "region", "regionserver"); + ImmutableMap.of("master", "master", "region", "regionserver"); public static RequestLog getRequestLog(String name) { String lookup = SERVER_TO_COMPONENT.get(name); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java index f8c04bac9715..a8ae50aa14d6 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -68,6 +68,7 @@ import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion; @@ -96,12 +97,10 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; /** - * Create a Jetty embedded server to answer http requests. The primary goal - * is to serve up status information for the server. - * There are three contexts: - * "/logs/" -> points to the log directory - * "/static/" -> points to common static files (src/webapps/static) - * "/" -> the jsp server code from (src/webapps/<name>) + * Create a Jetty embedded server to answer http requests. The primary goal is to serve up status + * information for the server. There are three contexts: "/logs/" -> points to the log directory + * "/static/" -> points to common static files (src/webapps/static) "/" -> the jsp server code + * from (src/webapps/<name>) */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -111,14 +110,12 @@ public class HttpServer implements FilterContainer { private static final int DEFAULT_MAX_HEADER_SIZE = 64 * 1024; // 64K - static final String FILTER_INITIALIZERS_PROPERTY - = "hbase.http.filter.initializers"; + static final String FILTER_INITIALIZERS_PROPERTY = "hbase.http.filter.initializers"; static final String HTTP_MAX_THREADS = "hbase.http.max.threads"; public static final String HTTP_UI_AUTHENTICATION = "hbase.security.authentication.ui"; static final String HTTP_AUTHENTICATION_PREFIX = "hbase.security.authentication."; - static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX - + "spnego."; + static final String HTTP_SPNEGO_AUTHENTICATION_PREFIX = HTTP_AUTHENTICATION_PREFIX + "spnego."; static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX = "kerberos.principal"; public static final String HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY = HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX; @@ -128,12 +125,12 @@ public class HttpServer implements FilterContainer { static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX = "kerberos.name.rules"; public static final String HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY = HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_SUFFIX; - static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = "kerberos.proxyuser.enable"; + static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX = + "kerberos.proxyuser.enable"; public static final String HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY = HTTP_SPNEGO_AUTHENTICATION_PREFIX + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_SUFFIX; - public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; - static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = - "signature.secret.file"; + public static final boolean HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT = false; + static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX = "signature.secret.file"; public static final String HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY = HTTP_AUTHENTICATION_PREFIX + HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX; public static final String HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY = @@ -162,11 +159,11 @@ public class HttpServer implements FilterContainer { private static final class ListenerInfo { /** - * Boolean flag to determine whether the HTTP server should clean up the - * listener in stop(). + * Boolean flag to determine whether the HTTP server should clean up the listener in stop(). */ private final boolean isManaged; private final ServerConnector listener; + private ListenerInfo(boolean isManaged, ServerConnector listener) { this.isManaged = isManaged; this.listener = listener; @@ -241,12 +238,9 @@ public static class Builder { /** * Add an endpoint that the HTTP server should listen to. - * - * @param endpoint - * the endpoint of that the HTTP server should listen to. The - * scheme specifies the protocol (i.e. HTTP / HTTPS), the host - * specifies the binding address, and the port specifies the - * listening port. Unspecified or zero port means that the server + * @param endpoint the endpoint of that the HTTP server should listen to. The scheme specifies + * the protocol (i.e. HTTP / HTTPS), the host specifies the binding address, and the + * port specifies the listening port. Unspecified or zero port means that the server * can listen to any port. */ public Builder addEndpoint(URI endpoint) { @@ -255,9 +249,9 @@ public Builder addEndpoint(URI endpoint) { } /** - * Set the hostname of the http server. The host name is used to resolve the - * _HOST field in Kerberos principals. The hostname of the first listener - * will be used if the name is unspecified. + * Set the hostname of the http server. The host name is used to resolve the _HOST field in + * Kerberos principals. The hostname of the first listener will be used if the name is + * unspecified. */ public Builder hostName(String hostName) { this.hostName = hostName; @@ -284,8 +278,7 @@ public Builder keyPassword(String password) { } /** - * Specify whether the server should authorize the client in SSL - * connections. + * Specify whether the server should authorize the client in SSL connections. */ public Builder needsClientAuth(boolean value) { this.needsClientAuth = value; @@ -297,7 +290,7 @@ public Builder needsClientAuth(boolean value) { * @deprecated Since 0.99.0. Use {@link #setAppDir(String)} instead. */ @Deprecated - public Builder setName(String name){ + public Builder setName(String name) { this.name = name; return this; } @@ -307,7 +300,7 @@ public Builder setName(String name){ * @deprecated Since 0.99.0. Use {@link #addEndpoint(URI)} instead. */ @Deprecated - public Builder setBindAddress(String bindAddress){ + public Builder setBindAddress(String bindAddress) { this.bindAddress = bindAddress; return this; } @@ -393,7 +386,7 @@ public HttpServer build() throws IOException { try { endpoints.add(0, new URI("http", "", bindAddress, port, "", "", "")); } catch (URISyntaxException e) { - throw new HadoopIllegalArgumentException("Invalid endpoint: "+ e); + throw new HadoopIllegalArgumentException("Invalid endpoint: " + e); } } @@ -447,11 +440,11 @@ public HttpServer build() throws IOException { LOG.debug("Excluded SSL Cipher List:" + excludeCiphers); } - listener = new ServerConnector(server.webServer, new SslConnectionFactory(sslCtxFactory, - HttpVersion.HTTP_1_1.toString()), new HttpConnectionFactory(httpsConfig)); + listener = new ServerConnector(server.webServer, + new SslConnectionFactory(sslCtxFactory, HttpVersion.HTTP_1_1.toString()), + new HttpConnectionFactory(httpsConfig)); } else { - throw new HadoopIllegalArgumentException( - "unknown scheme for endpoint:" + ep); + throw new HadoopIllegalArgumentException("unknown scheme for endpoint:" + ep); } // default settings for connector @@ -482,90 +475,83 @@ public HttpServer build() throws IOException { */ @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort) - throws IOException { + throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } /** - * Create a status server on the given port. Allows you to specify the - * path specifications that this server will be serving so that they will be - * added to the filters properly. - * + * Create a status server on the given port. Allows you to specify the path specifications that + * this server will be serving so that they will be added to the filters properly. * @param name The name of the server * @param bindAddress The address for this server * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. * @param conf Configuration - * @param pathSpecs Path specifications that this httpserver will be serving. - * These will be added to any filters. + * @param pathSpecs Path specifications that this httpserver will be serving. These will be added + * to any filters. * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, String[] pathSpecs) throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + String[] pathSpecs) throws IOException { this(name, bindAddress, port, findPort, conf, null, pathSpecs); } /** - * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/<name>. + * Create a status server on the given port. The jsp scripts are taken from + * src/webapps/<name>. * @param name The name of the server * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. * @param conf Configuration * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf) throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) + throws IOException { this(name, bindAddress, port, findPort, conf, null, null); } /** - * Creates a status server on the given port. The JSP scripts are taken - * from src/webapp<name>. - * + * Creates a status server on the given port. The JSP scripts are taken from + * src/webapp<name>. * @param name the name of the server * @param bindAddress the address for this server * @param port the port to use on the server * @param findPort whether the server should start at the given port and increment by 1 until it - * finds a free port + * finds a free port * @param conf the configuration to use * @param adminsAcl {@link AccessControlList} of the admins * @throws IOException when creating the server fails * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, AccessControlList adminsAcl) - throws IOException { + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + AccessControlList adminsAcl) throws IOException { this(name, bindAddress, port, findPort, conf, adminsAcl, null); } /** - * Create a status server on the given port. - * The jsp scripts are taken from src/webapps/<name>. + * Create a status server on the given port. The jsp scripts are taken from + * src/webapps/<name>. * @param name The name of the server * @param bindAddress The address for this server * @param port The port to use on the server - * @param findPort whether the server should start at the given port and - * increment by 1 until it finds a free port. + * @param findPort whether the server should start at the given port and increment by 1 until it + * finds a free port. * @param conf Configuration * @param adminsAcl {@link AccessControlList} of the admins - * @param pathSpecs Path specifications that this httpserver will be serving. - * These will be added to any filters. + * @param pathSpecs Path specifications that this httpserver will be serving. These will be added + * to any filters. * @deprecated Since 0.99.0 */ @Deprecated - public HttpServer(String name, String bindAddress, int port, - boolean findPort, Configuration conf, AccessControlList adminsAcl, - String[] pathSpecs) throws IOException { - this(new Builder().setName(name) - .addEndpoint(URI.create("http://" + bindAddress + ":" + port)) - .setFindPort(findPort).setConf(conf).setACL(adminsAcl) - .setPathSpec(pathSpecs)); + public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, + AccessControlList adminsAcl, String[] pathSpecs) throws IOException { + this(new Builder().setName(name).addEndpoint(URI.create("http://" + bindAddress + ":" + port)) + .setFindPort(findPort).setConf(conf).setACL(adminsAcl).setPathSpec(pathSpecs)); } private HttpServer(final Builder b) throws IOException { @@ -573,12 +559,11 @@ private HttpServer(final Builder b) throws IOException { this.logDir = b.logDir; final String appDir = getWebAppsPath(b.name); - int maxThreads = b.conf.getInt(HTTP_MAX_THREADS, 16); // If HTTP_MAX_THREADS is less than or equal to 0, QueueThreadPool() will use the // default value (currently 200). - QueuedThreadPool threadPool = maxThreads <= 0 ? new QueuedThreadPool() - : new QueuedThreadPool(maxThreads); + QueuedThreadPool threadPool = + maxThreads <= 0 ? new QueuedThreadPool() : new QueuedThreadPool(maxThreads); threadPool.setDaemon(true); this.webServer = new Server(threadPool); @@ -590,9 +575,8 @@ private HttpServer(final Builder b) throws IOException { this.webServer.setHandler(buildGzipHandler(this.webServer.getHandler())); } - private void initializeWebServer(String name, String hostName, - Configuration conf, String[] pathSpecs, HttpServer.Builder b) - throws FileNotFoundException, IOException { + private void initializeWebServer(String name, String hostName, Configuration conf, + String[] pathSpecs, HttpServer.Builder b) throws FileNotFoundException, IOException { Preconditions.checkNotNull(webAppContext); @@ -623,20 +607,18 @@ private void initializeWebServer(String name, String hostName, addGlobalFilter("safety", QuotingInputFilter.class.getName(), null); - addGlobalFilter("clickjackingprevention", - ClickjackingPreventionFilter.class.getName(), - ClickjackingPreventionFilter.getDefaultParameters(conf)); + addGlobalFilter("clickjackingprevention", ClickjackingPreventionFilter.class.getName(), + ClickjackingPreventionFilter.getDefaultParameters(conf)); HttpConfig httpConfig = new HttpConfig(conf); - addGlobalFilter("securityheaders", - SecurityHeadersFilter.class.getName(), - SecurityHeadersFilter.getDefaultParameters(conf, httpConfig.isSecure())); + addGlobalFilter("securityheaders", SecurityHeadersFilter.class.getName(), + SecurityHeadersFilter.getDefaultParameters(conf, httpConfig.isSecure())); // But security needs to be enabled prior to adding the other servlets if (authenticationEnabled) { initSpnego(conf, hostName, b.usernameConfKey, b.keytabConfKey, b.kerberosNameRulesKey, - b.signatureSecretFileKey); + b.signatureSecretFileKey); } final FilterInitializer[] initializers = getFilterInitializers(conf); @@ -662,16 +644,16 @@ private void addManagedListener(ServerConnector connector) { listeners.add(new ListenerInfo(true, connector)); } - private static WebAppContext createWebAppContext(String name, - Configuration conf, AccessControlList adminsAcl, final String appDir) { + private static WebAppContext createWebAppContext(String name, Configuration conf, + AccessControlList adminsAcl, final String appDir) { WebAppContext ctx = new WebAppContext(); ctx.setDisplayName(name); ctx.setContextPath("/"); ctx.setWar(appDir + "/" + name); ctx.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); // for org.apache.hadoop.metrics.MetricsServlet - ctx.getServletContext().setAttribute( - org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, conf); + ctx.getServletContext().setAttribute(org.apache.hadoop.http.HttpServer2.CONF_CONTEXT_ATTRIBUTE, + conf); ctx.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); addNoCacheFilter(ctx); return ctx; @@ -681,11 +663,12 @@ private static WebAppContext createWebAppContext(String name, * Construct and configure an instance of {@link GzipHandler}. With complex * multi-{@link WebAppContext} configurations, it's easiest to apply this handler directly to the * instance of {@link Server} near the end of its configuration, something like + * *
    -   *    Server server = new Server();
    -   *    //...
    -   *    server.setHandler(buildGzipHandler(server.getHandler()));
    -   *    server.start();
    +   * Server server = new Server();
    +   * // ...
    +   * server.setHandler(buildGzipHandler(server.getHandler()));
    +   * server.start();
        * 
    */ public static GzipHandler buildGzipHandler(final Handler wrapped) { @@ -696,7 +679,7 @@ public static GzipHandler buildGzipHandler(final Handler wrapped) { private static void addNoCacheFilter(WebAppContext ctxt) { defineFilter(ctxt, NO_CACHE_FILTER, NoCacheFilter.class.getName(), - Collections. emptyMap(), new String[] { "/*" }); + Collections. emptyMap(), new String[] { "/*" }); } /** Get an array of FilterConfiguration specified in the conf */ @@ -711,8 +694,8 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { } FilterInitializer[] initializers = new FilterInitializer[classes.length]; - for(int i = 0; i < classes.length; i++) { - initializers[i] = (FilterInitializer)ReflectionUtils.newInstance(classes[i]); + for (int i = 0; i < classes.length; i++) { + initializers[i] = (FilterInitializer) ReflectionUtils.newInstance(classes[i]); } return initializers; } @@ -721,8 +704,8 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { * Add default apps. * @param appDir The application directory */ - protected void addDefaultApps(ContextHandlerCollection parent, - final String appDir, Configuration conf) { + protected void addDefaultApps(ContextHandlerCollection parent, final String appDir, + Configuration conf) { // set up the context for "/logs/" if "hadoop.log.dir" property is defined. String logDir = this.logDir; if (logDir == null) { @@ -733,12 +716,10 @@ protected void addDefaultApps(ContextHandlerCollection parent, logContext.addServlet(AdminAuthorizedServlet.class, "/*"); logContext.setResourceBase(logDir); - if (conf.getBoolean( - ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, - ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) { + if (conf.getBoolean(ServerConfigurationKeys.HBASE_JETTY_LOGS_SERVE_ALIASES, + ServerConfigurationKeys.DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES)) { Map params = logContext.getInitParams(); - params.put( - "org.mortbay.jetty.servlet.Default.aliases", "true"); + params.put("org.mortbay.jetty.servlet.Default.aliases", "true"); } logContext.setDisplayName("logs"); setContextAttributes(logContext, conf); @@ -761,13 +742,13 @@ private void setContextAttributes(ServletContextHandler context, Configuration c /** * Add default servlets. */ - protected void addDefaultServlets( - ContextHandlerCollection contexts, Configuration conf) throws IOException { + protected void addDefaultServlets(ContextHandlerCollection contexts, Configuration conf) + throws IOException { // set up default servlets addPrivilegedServlet("stacks", "/stacks", StackServlet.class); addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class); - // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's - // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. + // Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's + // MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2. // Remove when we drop support for hbase on hadoop2.x. try { Class clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet"); @@ -796,14 +777,14 @@ protected void addDefaultServlets( genCtx.setDisplayName("prof-output-hbase"); } else { addUnprivilegedServlet("prof", "/prof", ProfileServlet.DisabledServlet.class); - LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + - "not specified. Disabling /prof endpoint."); + LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " + + "not specified. Disabling /prof endpoint."); } } /** - * Set a value in the webapp context. These values are available to the jsp - * pages as "application.getAttribute(name)". + * Set a value in the webapp context. These values are available to the jsp pages as + * "application.getAttribute(name)". * @param name The name of the attribute * @param value The value of the attribute */ @@ -816,10 +797,8 @@ public void setAttribute(String name, Object value) { * @param packageName The Java package name containing the Jersey resource. * @param pathSpec The path spec for the servlet */ - public void addJerseyResourcePackage(final String packageName, - final String pathSpec) { - LOG.info("addJerseyResourcePackage: packageName=" + packageName - + ", pathSpec=" + pathSpec); + public void addJerseyResourcePackage(final String packageName, final String pathSpec) { + LOG.info("addJerseyResourcePackage: packageName=" + packageName + ", pathSpec=" + pathSpec); ResourceConfig application = new ResourceConfig().packages(packageName); final ServletHolder sh = new ServletHolder(new ServletContainer(application)); @@ -828,8 +807,8 @@ public void addJerseyResourcePackage(final String packageName, /** * Adds a servlet in the server that any user can access. This method differs from - * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user - * can interact with the servlet added by this method. + * {@link #addPrivilegedServlet(String, String, Class)} in that any authenticated user can + * interact with the servlet added by this method. * @param name The name of the servlet (can be passed as null) * @param pathSpec The path spec for the servlet * @param clazz The servlet class @@ -841,8 +820,8 @@ public void addUnprivilegedServlet(String name, String pathSpec, /** * Adds a servlet in the server that any user can access. This method differs from - * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user - * can interact with the servlet added by this method. + * {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user can + * interact with the servlet added by this method. * @param pathSpec The path spec for the servlet * @param holder The servlet holder */ @@ -862,9 +841,8 @@ public void addPrivilegedServlet(String name, String pathSpec, /** * Adds a servlet in the server that only administrators can access. This method differs from - * {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those - * authenticated user who are identified as administrators can interact with the servlet added by - * this method. + * {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those authenticated user + * who are identified as administrators can interact with the servlet added by this method. */ public void addPrivilegedServlet(String pathSpec, ServletHolder holder) { addServletWithAuth(pathSpec, holder, true); @@ -875,8 +853,8 @@ public void addPrivilegedServlet(String pathSpec, ServletHolder holder) { * directly, but invoke it via {@link #addUnprivilegedServlet(String, String, Class)} or * {@link #addPrivilegedServlet(String, String, Class)}. */ - void addServletWithAuth(String name, String pathSpec, - Class clazz, boolean requireAuthz) { + void addServletWithAuth(String name, String pathSpec, Class clazz, + boolean requireAuthz) { addInternalServlet(name, pathSpec, clazz, requireAuthz); addFilterPathMapping(pathSpec, webAppContext); } @@ -892,20 +870,17 @@ void addServletWithAuth(String pathSpec, ServletHolder holder, boolean requireAu } /** - * Add an internal servlet in the server, specifying whether or not to - * protect with Kerberos authentication. - * Note: This method is to be used for adding servlets that facilitate - * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberos - * filters) are not enabled. - * + * Add an internal servlet in the server, specifying whether or not to protect with Kerberos + * authentication. Note: This method is to be used for adding servlets that facilitate internal + * communication and not for user facing functionality. For servlets added using this method, + * filters (except internal Kerberos filters) are not enabled. * @param name The name of the {@link Servlet} (can be passed as null) * @param pathSpec The path spec for the {@link Servlet} * @param clazz The {@link Servlet} class * @param requireAuthz Require Kerberos authenticate to access servlet */ - void addInternalServlet(String name, String pathSpec, - Class clazz, boolean requireAuthz) { + void addInternalServlet(String name, String pathSpec, Class clazz, + boolean requireAuthz) { ServletHolder holder = new ServletHolder(clazz); if (name != null) { holder.setName(name); @@ -914,13 +889,10 @@ void addInternalServlet(String name, String pathSpec, } /** - * Add an internal servlet in the server, specifying whether or not to - * protect with Kerberos authentication. - * Note: This method is to be used for adding servlets that facilitate - * internal communication and not for user facing functionality. For - * servlets added using this method, filters (except internal Kerberos - * filters) are not enabled. - * + * Add an internal servlet in the server, specifying whether or not to protect with Kerberos + * authentication. Note: This method is to be used for adding servlets that facilitate internal + * communication and not for user facing functionality. For servlets added using this method, + * filters (except internal Kerberos filters) are not enabled. * @param pathSpec The path spec for the {@link Servlet} * @param holder The object providing the {@link Servlet} instance * @param requireAuthz Require Kerberos authenticate to access servlet @@ -944,15 +916,15 @@ void addInternalServlet(String pathSpec, ServletHolder holder, boolean requireAu public void addFilter(String name, String classname, Map parameters) { final String[] USER_FACING_URLS = { "*.html", "*.jsp" }; defineFilter(webAppContext, name, classname, parameters, USER_FACING_URLS); - LOG.info("Added filter " + name + " (class=" + classname - + ") to context " + webAppContext.getDisplayName()); + LOG.info("Added filter " + name + " (class=" + classname + ") to context " + + webAppContext.getDisplayName()); final String[] ALL_URLS = { "/*" }; for (Map.Entry e : defaultContexts.entrySet()) { if (e.getValue()) { ServletContextHandler handler = e.getKey(); defineFilter(handler, name, classname, parameters, ALL_URLS); - LOG.info("Added filter " + name + " (class=" + classname - + ") to context " + handler.getDisplayName()); + LOG.info("Added filter " + name + " (class=" + classname + ") to context " + + handler.getDisplayName()); } } filterNames.add(name); @@ -971,8 +943,8 @@ public void addGlobalFilter(String name, String classname, Map p /** * Define a filter for a context and set up default url mappings. */ - public static void defineFilter(ServletContextHandler handler, String name, - String classname, Map parameters, String[] urls) { + public static void defineFilter(ServletContextHandler handler, String name, String classname, + Map parameters, String[] urls) { FilterHolder holder = new FilterHolder(); holder.setName(name); holder.setClassName(classname); @@ -991,9 +963,8 @@ public static void defineFilter(ServletContextHandler handler, String name, * @param pathSpec The path spec * @param webAppCtx The WebApplicationContext to add to */ - protected void addFilterPathMapping(String pathSpec, - WebAppContext webAppCtx) { - for(String name : filterNames) { + protected void addFilterPathMapping(String pathSpec, WebAppContext webAppCtx) { + for (String name : filterNames) { FilterMapping fmap = new FilterMapping(); fmap.setPathSpec(pathSpec); fmap.setFilterName(name); @@ -1011,7 +982,7 @@ public Object getAttribute(String name) { return webAppContext.getAttribute(name); } - public WebAppContext getWebAppContext(){ + public WebAppContext getWebAppContext() { return this.webAppContext; } @@ -1029,8 +1000,7 @@ protected String getWebAppsPath(String webapps, String appName) throws FileNotFo URL url = getClass().getClassLoader().getResource(webapps + "/" + appName); if (url == null) { - throw new FileNotFoundException(webapps + "/" + appName - + " not found in CLASSPATH"); + throw new FileNotFoundException(webapps + "/" + appName + " not found in CLASSPATH"); } String urlString = url.toString(); @@ -1044,14 +1014,13 @@ protected String getWebAppsPath(String webapps, String appName) throws FileNotFo */ @Deprecated public int getPort() { - return ((ServerConnector)webServer.getConnectors()[0]).getLocalPort(); + return ((ServerConnector) webServer.getConnectors()[0]).getLocalPort(); } /** * Get the address that corresponds to a particular connector. - * - * @return the corresponding address for the connector, or null if there's no - * such connector or the connector is not bounded. + * @return the corresponding address for the connector, or null if there's no such connector or + * the connector is not bounded. */ public InetSocketAddress getConnectorAddress(int index) { Preconditions.checkArgument(index >= 0); @@ -1060,7 +1029,7 @@ public InetSocketAddress getConnectorAddress(int index) { return null; } - ServerConnector c = (ServerConnector)webServer.getConnectors()[index]; + ServerConnector c = (ServerConnector) webServer.getConnectors()[index]; if (c.getLocalPort() == -1 || c.getLocalPort() == -2) { // -1 if the connector has not been opened // -2 if it has been closed @@ -1079,14 +1048,14 @@ public void setThreads(int min, int max) { pool.setMaxThreads(max); } - private void initSpnego(Configuration conf, String hostName, - String usernameConfKey, String keytabConfKey, String kerberosNameRuleKey, - String signatureSecretKeyFileKey) throws IOException { + private void initSpnego(Configuration conf, String hostName, String usernameConfKey, + String keytabConfKey, String kerberosNameRuleKey, String signatureSecretKeyFileKey) + throws IOException { Map params = new HashMap<>(); String principalInConf = getOrEmptyString(conf, usernameConfKey); if (!principalInConf.isEmpty()) { - params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, SecurityUtil.getServerPrincipal( - principalInConf, hostName)); + params.put(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX, + SecurityUtil.getServerPrincipal(principalInConf, hostName)); } String httpKeytab = getOrEmptyString(conf, keytabConfKey); if (!httpKeytab.isEmpty()) { @@ -1098,30 +1067,30 @@ private void initSpnego(Configuration conf, String hostName, } String signatureSecretKeyFile = getOrEmptyString(conf, signatureSecretKeyFileKey); if (!signatureSecretKeyFile.isEmpty()) { - params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, - signatureSecretKeyFile); + params.put(HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_SUFFIX, signatureSecretKeyFile); } params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); // Verify that the required options were provided - if (isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) || - isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX))) { - throw new IllegalArgumentException(usernameConfKey + " and " - + keytabConfKey + " are both required in the configuration " - + "to enable SPNEGO/Kerberos authentication for the Web UI"); + if (isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_SUFFIX)) + || isMissing(params.get(HTTP_SPNEGO_AUTHENTICATION_KEYTAB_SUFFIX))) { + throw new IllegalArgumentException( + usernameConfKey + " and " + keytabConfKey + " are both required in the configuration " + + "to enable SPNEGO/Kerberos authentication for the Web UI"); } if (conf.getBoolean(HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_KEY, - HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) { - //Copy/rename standard hadoop proxyuser settings to filter - for(Map.Entry proxyEntry : - conf.getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { - params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), - proxyEntry.getValue()); - } - addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), params); + HTTP_SPNEGO_AUTHENTICATION_PROXYUSER_ENABLE_DEFAULT)) { + // Copy/rename standard hadoop proxyuser settings to filter + for (Map.Entry proxyEntry : conf + .getPropsWithPrefix(ProxyUsers.CONF_HADOOP_PROXYUSER).entrySet()) { + params.put(ProxyUserAuthenticationFilter.PROXYUSER_PREFIX + proxyEntry.getKey(), + proxyEntry.getValue()); + } + addGlobalFilter(SPNEGO_PROXYUSER_FILTER, ProxyUserAuthenticationFilter.class.getName(), + params); } else { - addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); + addGlobalFilter(SPNEGO_FILTER, AuthenticationFilter.class.getName(), params); } } @@ -1136,8 +1105,7 @@ private boolean isMissing(String value) { } /** - * Extracts the value for the given key from the configuration of returns a string of - * zero length. + * Extracts the value for the given key from the configuration of returns a string of zero length. */ private String getOrEmptyString(Configuration conf, String key) { if (null == key) { @@ -1166,8 +1134,7 @@ public void start() throws IOException { Handler[] handlers = webServer.getHandlers(); for (int i = 0; i < handlers.length; i++) { if (handlers[i].isFailed()) { - throw new IOException( - "Problem in starting http server. Server handlers failed"); + throw new IOException("Problem in starting http server. Server handlers failed"); } } // Make sure there are no errors initializing the context. @@ -1176,14 +1143,13 @@ public void start() throws IOException { // Have to stop the webserver, or else its non-daemon threads // will hang forever. webServer.stop(); - throw new IOException("Unable to initialize WebAppContext", - unavailableException); + throw new IOException("Unable to initialize WebAppContext", unavailableException); } } catch (IOException e) { throw e; } catch (InterruptedException e) { - throw (IOException) new InterruptedIOException( - "Interrupted while starting HTTP server").initCause(e); + throw (IOException) new InterruptedIOException("Interrupted while starting HTTP server") + .initCause(e); } catch (Exception e) { throw new IOException("Problem starting http server", e); } @@ -1216,12 +1182,12 @@ void openListeners() throws Exception { LOG.info("Jetty bound to port " + listener.getLocalPort()); break; } catch (IOException ex) { - if(!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { + if (!(ex instanceof BindException) && !(ex.getCause() instanceof BindException)) { throw ex; } if (port == 0 || !findPort) { - BindException be = new BindException("Port in use: " - + listener.getHost() + ":" + listener.getPort()); + BindException be = + new BindException("Port in use: " + listener.getHost() + ":" + listener.getPort()); be.initCause(ex); throw be; } @@ -1246,9 +1212,7 @@ public void stop() throws Exception { try { li.listener.close(); } catch (Exception e) { - LOG.error( - "Error while stopping listener for webapp" - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping listener for webapp" + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } } @@ -1258,16 +1222,15 @@ public void stop() throws Exception { webAppContext.clearAttributes(); webAppContext.stop(); } catch (Exception e) { - LOG.error("Error while stopping web app context for webapp " - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping web app context for webapp " + webAppContext.getDisplayName(), + e); exception = addMultiException(exception, e); } try { webServer.stop(); } catch (Exception e) { - LOG.error("Error while stopping web server for webapp " - + webAppContext.getDisplayName(), e); + LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } @@ -1278,7 +1241,7 @@ public void stop() throws Exception { } private MultiException addMultiException(MultiException exception, Exception e) { - if(exception == null){ + if (exception == null) { exception = new MultiException(); } exception.add(e); @@ -1307,8 +1270,8 @@ public String toString() { return "Inactive HttpServer"; } else { StringBuilder sb = new StringBuilder("HttpServer (") - .append(isAlive() ? STATE_DESCRIPTION_ALIVE : - STATE_DESCRIPTION_NOT_LIVE).append("), listening at:"); + .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE) + .append("), listening at:"); for (ListenerInfo li : listeners) { ServerConnector l = li.listener; sb.append(l.getHost()).append(":").append(l.getPort()).append("/,"); @@ -1320,29 +1283,26 @@ public String toString() { /** * Checks the user has privileges to access to instrumentation servlets. *

    - * If hadoop.security.instrumentation.requires.admin is set to FALSE - * (default value) it always returns TRUE. - *

    - * If hadoop.security.instrumentation.requires.admin is set to TRUE - * it will check that if the current user is in the admin ACLS. If the user is - * in the admin ACLs it returns TRUE, otherwise it returns FALSE. + * If hadoop.security.instrumentation.requires.admin is set to FALSE (default value) + * it always returns TRUE. + *

    + *

    + * If hadoop.security.instrumentation.requires.admin is set to TRUE it will check + * that if the current user is in the admin ACLS. If the user is in the admin ACLs it returns + * TRUE, otherwise it returns FALSE. *

    - * * @param servletContext the servlet context. * @param request the servlet request. * @param response the servlet response. * @return TRUE/FALSE based on the logic decribed above. */ - public static boolean isInstrumentationAccessAllowed( - ServletContext servletContext, HttpServletRequest request, - HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + public static boolean isInstrumentationAccessAllowed(ServletContext servletContext, + HttpServletRequest request, HttpServletResponse response) throws IOException { + Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); boolean access = true; - boolean adminAccess = conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, - false); + boolean adminAccess = conf + .getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, false); if (adminAccess) { access = hasAdministratorAccess(servletContext, request, response); } @@ -1350,20 +1310,17 @@ public static boolean isInstrumentationAccessAllowed( } /** - * Does the user sending the HttpServletRequest has the administrator ACLs? If - * it isn't the case, response will be modified to send an error to the user. - * + * Does the user sending the HttpServletRequest has the administrator ACLs? If it isn't the case, + * response will be modified to send an error to the user. * @param servletContext the {@link ServletContext} to use * @param request the {@link HttpServletRequest} to check * @param response used to send the error response if user does not have admin access. * @return true if admin-authorized, false otherwise * @throws IOException if an unauthenticated or unauthorized user tries to access the page */ - public static boolean hasAdministratorAccess( - ServletContext servletContext, HttpServletRequest request, - HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + public static boolean hasAdministratorAccess(ServletContext servletContext, + HttpServletRequest request, HttpServletResponse response) throws IOException { + Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); AccessControlList acl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return hasAdministratorAccess(conf, acl, request, response); @@ -1372,22 +1329,20 @@ public static boolean hasAdministratorAccess( public static boolean hasAdministratorAccess(Configuration conf, AccessControlList acl, HttpServletRequest request, HttpServletResponse response) throws IOException { // If there is no authorization, anybody has administrator access. - if (!conf.getBoolean( - CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { + if (!conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { return true; } String remoteUser = request.getRemoteUser(); if (remoteUser == null) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, - "Unauthenticated users are not " + - "authorized to access this page."); + "Unauthenticated users are not " + "authorized to access this page."); return false; } if (acl != null && !userHasAdministratorAccess(acl, remoteUser)) { - response.sendError(HttpServletResponse.SC_FORBIDDEN, "User " - + remoteUser + " is unauthorized to access this page."); + response.sendError(HttpServletResponse.SC_FORBIDDEN, + "User " + remoteUser + " is unauthorized to access this page."); return false; } @@ -1395,46 +1350,39 @@ public static boolean hasAdministratorAccess(Configuration conf, AccessControlLi } /** - * Get the admin ACLs from the given ServletContext and check if the given - * user is in the ACL. - * + * Get the admin ACLs from the given ServletContext and check if the given user is in the ACL. * @param servletContext the context containing the admin ACL. * @param remoteUser the remote user to check for. - * @return true if the user is present in the ACL, false if no ACL is set or - * the user is not present + * @return true if the user is present in the ACL, false if no ACL is set or the user is not + * present */ public static boolean userHasAdministratorAccess(ServletContext servletContext, String remoteUser) { - AccessControlList adminsAcl = (AccessControlList) servletContext - .getAttribute(ADMINS_ACL); + AccessControlList adminsAcl = (AccessControlList) servletContext.getAttribute(ADMINS_ACL); return userHasAdministratorAccess(adminsAcl, remoteUser); } public static boolean userHasAdministratorAccess(AccessControlList acl, String remoteUser) { - UserGroupInformation remoteUserUGI = - UserGroupInformation.createRemoteUser(remoteUser); + UserGroupInformation remoteUserUGI = UserGroupInformation.createRemoteUser(remoteUser); return acl != null && acl.isUserAllowed(remoteUserUGI); } /** - * A very simple servlet to serve up a text representation of the current - * stack traces. It both returns the stacks to the caller and logs them. - * Currently the stack traces are done sequentially rather than exactly the - * same data. + * A very simple servlet to serve up a text representation of the current stack traces. It both + * returns the stacks to the caller and logs them. Currently the stack traces are done + * sequentially rather than exactly the same data. */ public static class StackServlet extends HttpServlet { private static final long serialVersionUID = -6284183679759467039L; @Override public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { + throws ServletException, IOException { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } response.setContentType("text/plain; charset=UTF-8"); - try (PrintStream out = new PrintStream( - response.getOutputStream(), false, "UTF-8")) { + try (PrintStream out = new PrintStream(response.getOutputStream(), false, "UTF-8")) { Threads.printThreadInfo(out, ""); out.flush(); } @@ -1443,9 +1391,9 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) } /** - * A Servlet input filter that quotes all HTML active characters in the - * parameter names and values. The goal is to quote the characters to make - * all of the servlets resistant to cross-site scripting attacks. + * A Servlet input filter that quotes all HTML active characters in the parameter names and + * values. The goal is to quote the characters to make all of the servlets resistant to cross-site + * scripting attacks. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public static class QuotingInputFilter implements Filter { @@ -1453,6 +1401,7 @@ public static class QuotingInputFilter implements Filter { public static class RequestQuoter extends HttpServletRequestWrapper { private final HttpServletRequest rawRequest; + public RequestQuoter(HttpServletRequest rawRequest) { super(rawRequest); this.rawRequest = rawRequest; @@ -1464,8 +1413,8 @@ public RequestQuoter(HttpServletRequest rawRequest) { @Override public Enumeration getParameterNames() { return new Enumeration() { - private Enumeration rawIterator = - rawRequest.getParameterNames(); + private Enumeration rawIterator = rawRequest.getParameterNames(); + @Override public boolean hasMoreElements() { return rawIterator.hasMoreElements(); @@ -1483,8 +1432,8 @@ public String nextElement() { */ @Override public String getParameter(String name) { - return HtmlQuoting.quoteHtmlChars(rawRequest.getParameter( - HtmlQuoting.unquoteHtmlChars(name))); + return HtmlQuoting + .quoteHtmlChars(rawRequest.getParameter(HtmlQuoting.unquoteHtmlChars(name))); } @Override @@ -1495,7 +1444,7 @@ public String[] getParameterValues(String name) { return null; } String[] result = new String[unquoteValue.length]; - for(int i=0; i < result.length; ++i) { + for (int i = 0; i < result.length; ++i) { result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]); } return result; @@ -1505,10 +1454,10 @@ public String[] getParameterValues(String name) { public Map getParameterMap() { Map result = new HashMap<>(); Map raw = rawRequest.getParameterMap(); - for (Map.Entry item: raw.entrySet()) { + for (Map.Entry item : raw.entrySet()) { String[] rawValue = item.getValue(); String[] cookedValue = new String[rawValue.length]; - for(int i=0; i< rawValue.length; ++i) { + for (int i = 0; i < rawValue.length; ++i) { cookedValue[i] = HtmlQuoting.quoteHtmlChars(rawValue[i]); } result.put(HtmlQuoting.quoteHtmlChars(item.getKey()), cookedValue); @@ -1517,18 +1466,16 @@ public Map getParameterMap() { } /** - * Quote the url so that users specifying the HOST HTTP header - * can't inject attacks. + * Quote the url so that users specifying the HOST HTTP header can't inject attacks. */ @Override - public StringBuffer getRequestURL(){ + public StringBuffer getRequestURL() { String url = rawRequest.getRequestURL().toString(); return new StringBuffer(HtmlQuoting.quoteHtmlChars(url)); } /** - * Quote the server name so that users specifying the HOST HTTP header - * can't inject attacks. + * Quote the server name so that users specifying the HOST HTTP header can't inject attacks. */ @Override public String getServerName() { @@ -1546,12 +1493,9 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, - ServletResponse response, - FilterChain chain - ) throws IOException, ServletException { - HttpServletRequestWrapper quoted = - new RequestQuoter((HttpServletRequest) request); + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { + HttpServletRequestWrapper quoted = new RequestQuoter((HttpServletRequest) request); HttpServletResponse httpResponse = (HttpServletResponse) response; String mime = inferMimeType(request); @@ -1570,11 +1514,11 @@ public void doFilter(ServletRequest request, } /** - * Infer the mime type for the response based on the extension of the request - * URI. Returns null if unknown. + * Infer the mime type for the response based on the extension of the request URI. Returns null + * if unknown. */ private String inferMimeType(ServletRequest request) { - String path = ((HttpServletRequest)request).getRequestURI(); + String path = ((HttpServletRequest) request).getRequestURI(); ServletContext context = config.getServletContext(); return context.getMimeType(path); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java index 94269719aa42..8ff9d9691924 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/HttpServerUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,5 +59,6 @@ public static void constrainHttpMethods(ServletContextHandler ctxHandler, ctxHandler.setSecurityHandler(securityHandler); } - private HttpServerUtil() {} + private HttpServerUtil() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java index 8b13e2b22053..06949929baad 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/InfoServer.java @@ -27,16 +27,15 @@ import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort; import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder; /** - * Create a Jetty embedded server to answer http requests. The primary goal - * is to serve up status information for the server. - * There are three contexts: - * "/stacks/" -> points to stack trace - * "/static/" -> points to common static files (src/hbase-webapps/static) - * "/" -> the jsp server code from (src/hbase-webapps/<name>) + * Create a Jetty embedded server to answer http requests. The primary goal is to serve up status + * information for the server. There are three contexts: "/stacks/" -> points to stack trace + * "/static/" -> points to common static files (src/hbase-webapps/static) "/" -> the jsp + * server code from (src/hbase-webapps/<name>) */ @InterfaceAudience.Private public class InfoServer { @@ -44,48 +43,47 @@ public class InfoServer { private final org.apache.hadoop.hbase.http.HttpServer httpServer; /** - * Create a status server on the given port. - * The jsp scripts are taken from src/hbase-webapps/name. + * Create a status server on the given port. The jsp scripts are taken from + * src/hbase-webapps/name. * @param name The name of the server * @param bindAddress address to bind to * @param port The port to use on the server * @param findPort whether the server should start at the given port and increment by 1 until it - * finds a free port. + * finds a free port. * @param c the {@link Configuration} to build the server * @throws IOException if getting one of the password fails or the server cannot be created */ public InfoServer(String name, String bindAddress, int port, boolean findPort, final Configuration c) throws IOException { HttpConfig httpConfig = new HttpConfig(c); - HttpServer.Builder builder = - new org.apache.hadoop.hbase.http.HttpServer.Builder(); + HttpServer.Builder builder = new org.apache.hadoop.hbase.http.HttpServer.Builder(); - builder.setName(name).addEndpoint(URI.create(httpConfig.getSchemePrefix() + - HostAndPort.fromParts(bindAddress,port).toString())). - setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); + builder.setName(name) + .addEndpoint(URI.create( + httpConfig.getSchemePrefix() + HostAndPort.fromParts(bindAddress, port).toString())) + .setAppDir(HBASE_APP_DIR).setFindPort(findPort).setConf(c); String logDir = System.getProperty("hbase.log.dir"); if (logDir != null) { builder.setLogDir(logDir); } if (httpConfig.isSecure()) { - builder.keyPassword(HBaseConfiguration - .getPassword(c, "ssl.server.keystore.keypassword", null)) - .keyStore(c.get("ssl.server.keystore.location"), - HBaseConfiguration.getPassword(c,"ssl.server.keystore.password", null), - c.get("ssl.server.keystore.type", "jks")) - .trustStore(c.get("ssl.server.truststore.location"), - HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), - c.get("ssl.server.truststore.type", "jks")); + builder + .keyPassword(HBaseConfiguration.getPassword(c, "ssl.server.keystore.keypassword", null)) + .keyStore(c.get("ssl.server.keystore.location"), + HBaseConfiguration.getPassword(c, "ssl.server.keystore.password", null), + c.get("ssl.server.keystore.type", "jks")) + .trustStore(c.get("ssl.server.truststore.location"), + HBaseConfiguration.getPassword(c, "ssl.server.truststore.password", null), + c.get("ssl.server.truststore.type", "jks")); builder.excludeCiphers(c.get("ssl.server.exclude.cipher.list")); } // Enable SPNEGO authentication if ("kerberos".equalsIgnoreCase(c.get(HttpServer.HTTP_UI_AUTHENTICATION, null))) { builder.setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .setKerberosNameRulesKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY) - .setSignatureSecretFileKey( - HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) - .setSecurityEnabled(true); + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) + .setKerberosNameRulesKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KRB_NAME_KEY) + .setSignatureSecretFileKey(HttpServer.HTTP_AUTHENTICATION_SIGNATURE_SECRET_FILE_KEY) + .setSecurityEnabled(true); // Set an admin ACL on sensitive webUI endpoints AccessControlList acl = buildAdminAcl(c); @@ -95,13 +93,13 @@ public InfoServer(String name, String bindAddress, int port, boolean findPort, } /** - * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI - * which are meant only for administrators. + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which + * are meant only for administrators. */ AccessControlList buildAdminAcl(Configuration conf) { final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); - final String adminGroups = conf.get( - HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + final String adminGroups = + conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); if (userGroups == null && adminGroups == null) { // Backwards compatibility - if the user doesn't have anything set, allow all users in. return new AccessControlList("*", null); @@ -111,17 +109,14 @@ AccessControlList buildAdminAcl(Configuration conf) { /** * Explicitly invoke {@link #addPrivilegedServlet(String, String, Class)} or - * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. - * This method will add a servlet which any authenticated user can access. - * + * {@link #addUnprivilegedServlet(String, String, Class)} instead of this method. This method will + * add a servlet which any authenticated user can access. * @deprecated Use {@link #addUnprivilegedServlet(String, String, Class)} or - * {@link #addPrivilegedServlet(String, String, Class)} instead of this - * method which does not state outwardly what kind of authz rules will - * be applied to this servlet. + * {@link #addPrivilegedServlet(String, String, Class)} instead of this method which + * does not state outwardly what kind of authz rules will be applied to this servlet. */ @Deprecated - public void addServlet(String name, String pathSpec, - Class clazz) { + public void addServlet(String name, String pathSpec, Class clazz) { addUnprivilegedServlet(name, pathSpec, clazz); } @@ -130,7 +125,7 @@ public void addServlet(String name, String pathSpec, * @see HttpServer#addUnprivilegedServlet(String, String, Class) */ public void addUnprivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { this.httpServer.addUnprivilegedServlet(name, pathSpec, clazz); } @@ -150,7 +145,7 @@ public void addUnprivilegedServlet(String name, String pathSpec, ServletHolder h * @see HttpServer#addPrivilegedServlet(String, String, Class) */ public void addPrivilegedServlet(String name, String pathSpec, - Class clazz) { + Class clazz) { this.httpServer.addPrivilegedServlet(name, pathSpec, clazz); } @@ -175,21 +170,20 @@ public void stop() throws Exception { this.httpServer.stop(); } - /** * Returns true if and only if UI authentication (spnego) is enabled, UI authorization is enabled, * and the requesting user is defined as an administrator. If the UI is set to readonly, this * method always returns false. */ - public static boolean canUserModifyUI( - HttpServletRequest req, ServletContext ctx, Configuration conf) { + public static boolean canUserModifyUI(HttpServletRequest req, ServletContext ctx, + Configuration conf) { if (conf.getBoolean("hbase.master.ui.readonly", false)) { return false; } String remoteUser = req.getRemoteUser(); - if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) && - conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) && - remoteUser != null) { + if ("kerberos".equalsIgnoreCase(conf.get(HttpServer.HTTP_UI_AUTHENTICATION)) + && conf.getBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false) + && remoteUser != null) { return HttpServer.userHasAdministratorAccess(ctx, remoteUser); } return false; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java index cd49f7e16baf..0a9beb2ec241 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/NoCacheFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.http; import java.io.IOException; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -26,7 +25,6 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; @@ -38,9 +36,8 @@ public void init(FilterConfig filterConfig) throws ServletException { } @Override - public void doFilter(ServletRequest req, ServletResponse res, - FilterChain chain) - throws IOException, ServletException { + public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) + throws IOException, ServletException { HttpServletResponse httpRes = (HttpServletResponse) res; httpRes.setHeader("Cache-Control", "no-cache"); long now = EnvironmentEdgeManager.currentTime(); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java index d77ea9b14cec..3e84aeed0390 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileOutputServlet.java @@ -42,14 +42,14 @@ public class ProfileOutputServlet extends DefaultServlet { @Override protected void doGet(final HttpServletRequest req, final HttpServletResponse resp) - throws ServletException, IOException { + throws ServletException, IOException { String absoluteDiskPath = getServletContext().getRealPath(req.getPathInfo()); File requestedFile = new File(absoluteDiskPath); // async-profiler version 1.4 writes 'Started [cpu] profiling' to output file when profiler is // running which gets replaced by final output. If final output is not ready yet, the file size // will be <100 bytes (in all modes). if (requestedFile.length() < 100) { - LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); + LOG.info(requestedFile + " is incomplete. Sending auto-refresh header."); String refreshUrl = req.getRequestURI(); // Rebuild the query string (if we have one) if (req.getQueryString() != null) { @@ -57,8 +57,8 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res } ProfileServlet.setResponseHeader(resp); resp.setHeader("Refresh", REFRESH_PERIOD + ";" + refreshUrl); - resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + - " seconds until the output file is ready. Redirecting to " + refreshUrl); + resp.getWriter().write("This page will be auto-refreshed every " + REFRESH_PERIOD + + " seconds until the output file is ready. Redirecting to " + refreshUrl); } else { super.doGet(req, resp); } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java index 3ba59f4b66ce..1601373c57f8 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProfileServlet.java @@ -25,63 +25,35 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; - import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.util.ProcessUtils; import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Joiner; /** - * Servlet that runs async-profiler as web-endpoint. - * Following options from async-profiler can be specified as query paramater. - * // -e event profiling event: cpu|alloc|lock|cache-misses etc. - * // -d duration run profiling for 'duration' seconds (integer) - * // -i interval sampling interval in nanoseconds (long) - * // -j jstackdepth maximum Java stack depth (integer) - * // -b bufsize frame buffer size (long) - * // -t profile different threads separately - * // -s simple class names instead of FQN - * // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html - * // --width px SVG width pixels (integer) - * // --height px SVG frame height pixels (integer) - * // --minwidth px skip frames smaller than px (double) - * // --reverse generate stack-reversed FlameGraph / Call tree - * Example: - * - To collect 30 second CPU profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof" - * - To collect 1 minute CPU profile of current process and output in tree format (html) - * curl "http://localhost:10002/prof?output=tree&duration=60" - * - To collect 30 second heap allocation profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof?event=alloc" - * - To collect lock contention profile of current process (returns FlameGraph svg) - * curl "http://localhost:10002/prof?event=lock" - * Following event types are supported (default is 'cpu') (NOTE: not all OS'es support all events) - * // Perf events: - * // cpu - * // page-faults - * // context-switches - * // cycles - * // instructions - * // cache-references - * // cache-misses - * // branches - * // branch-misses - * // bus-cycles - * // L1-dcache-load-misses - * // LLC-load-misses - * // dTLB-load-misses - * // mem:breakpoint - * // trace:tracepoint - * // Java events: - * // alloc - * // lock + * Servlet that runs async-profiler as web-endpoint. Following options from async-profiler can be + * specified as query paramater. // -e event profiling event: cpu|alloc|lock|cache-misses etc. // -d + * duration run profiling for 'duration' seconds (integer) // -i interval sampling interval in + * nanoseconds (long) // -j jstackdepth maximum Java stack depth (integer) // -b bufsize frame + * buffer size (long) // -t profile different threads separately // -s simple class names instead of + * FQN // -o fmt[,fmt...] output format: summary|traces|flat|collapsed|svg|tree|jfr|html // --width + * px SVG width pixels (integer) // --height px SVG frame height pixels (integer) // --minwidth px + * skip frames smaller than px (double) // --reverse generate stack-reversed FlameGraph / Call tree + * Example: - To collect 30 second CPU profile of current process (returns FlameGraph svg) curl + * "http://localhost:10002/prof" - To collect 1 minute CPU profile of current process and output in + * tree format (html) curl "http://localhost:10002/prof?output=tree&duration=60" - To collect 30 + * second heap allocation profile of current process (returns FlameGraph svg) curl + * "http://localhost:10002/prof?event=alloc" - To collect lock contention profile of current process + * (returns FlameGraph svg) curl "http://localhost:10002/prof?event=lock" Following event types are + * supported (default is 'cpu') (NOTE: not all OS'es support all events) // Perf events: // cpu // + * page-faults // context-switches // cycles // instructions // cache-references // cache-misses // + * branches // branch-misses // bus-cycles // L1-dcache-load-misses // LLC-load-misses // + * dTLB-load-misses // mem:breakpoint // trace:tracepoint // Java events: // alloc // lock */ @InterfaceAudience.Private public class ProfileServlet extends HttpServlet { @@ -101,22 +73,12 @@ public class ProfileServlet extends HttpServlet { static final String OUTPUT_DIR = System.getProperty("java.io.tmpdir") + "/prof-output-hbase"; enum Event { - CPU("cpu"), - ALLOC("alloc"), - LOCK("lock"), - PAGE_FAULTS("page-faults"), - CONTEXT_SWITCHES("context-switches"), - CYCLES("cycles"), - INSTRUCTIONS("instructions"), - CACHE_REFERENCES("cache-references"), - CACHE_MISSES("cache-misses"), - BRANCHES("branches"), - BRANCH_MISSES("branch-misses"), - BUS_CYCLES("bus-cycles"), - L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"), - LLC_LOAD_MISSES("LLC-load-misses"), - DTLB_LOAD_MISSES("dTLB-load-misses"), - MEM_BREAKPOINT("mem:breakpoint"), + CPU("cpu"), ALLOC("alloc"), LOCK("lock"), PAGE_FAULTS("page-faults"), + CONTEXT_SWITCHES("context-switches"), CYCLES("cycles"), INSTRUCTIONS("instructions"), + CACHE_REFERENCES("cache-references"), CACHE_MISSES("cache-misses"), BRANCHES("branches"), + BRANCH_MISSES("branch-misses"), BUS_CYCLES("bus-cycles"), + L1_DCACHE_LOAD_MISSES("L1-dcache-load-misses"), LLC_LOAD_MISSES("LLC-load-misses"), + DTLB_LOAD_MISSES("dTLB-load-misses"), MEM_BREAKPOINT("mem:breakpoint"), TRACE_TRACEPOINT("trace:tracepoint"),; private final String internalName; @@ -141,20 +103,15 @@ public static Event fromInternalName(final String name) { } enum Output { - SUMMARY, - TRACES, - FLAT, - COLLAPSED, + SUMMARY, TRACES, FLAT, COLLAPSED, // No SVG in 2.x asyncprofiler. - SVG, - TREE, - JFR, + SVG, TREE, JFR, // In 2.x asyncprofiler, this is how you get flamegraphs. HTML } @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SE_TRANSIENT_FIELD_NOT_RESTORED", - justification = "This class is never serialized nor restored.") + justification = "This class is never serialized nor restored.") private transient Lock profilerLock = new ReentrantLock(); private transient volatile Process process; private String asyncProfilerHome; @@ -180,10 +137,10 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res if (asyncProfilerHome == null || asyncProfilerHome.trim().isEmpty()) { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" + - "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + - "environment is properly configured. For more information please see\n" + - "http://hbase.apache.org/book.html#profiler\n"); + resp.getWriter().write("ASYNC_PROFILER_HOME env is not set.\n\n" + + "Please ensure the prerequsites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); return; } @@ -217,9 +174,9 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res int lockTimeoutSecs = 3; if (profilerLock.tryLock(lockTimeoutSecs, TimeUnit.SECONDS)) { try { - File outputFile = new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + - event.name().toLowerCase() + "-" + ID_GEN.incrementAndGet() + "." + - output.name().toLowerCase()); + File outputFile = + new File(OUTPUT_DIR, "async-prof-pid-" + pid + "-" + event.name().toLowerCase() + + "-" + ID_GEN.incrementAndGet() + "." + output.name().toLowerCase()); List cmd = new ArrayList<>(); cmd.add(asyncProfilerHome + PROFILER_SCRIPT); cmd.add("-e"); @@ -270,11 +227,10 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res setResponseHeader(resp); resp.setStatus(HttpServletResponse.SC_ACCEPTED); String relativeUrl = "/prof-output-hbase/" + outputFile.getName(); - resp.getWriter().write( - "Started [" + event.getInternalName() + - "] profiling. This page will automatically redirect to " + - relativeUrl + " after " + duration + " seconds.\n\nCommand:\n" + - Joiner.on(" ").join(cmd)); + resp.getWriter() + .write("Started [" + event.getInternalName() + + "] profiling. This page will automatically redirect to " + relativeUrl + + " after " + duration + " seconds.\n\nCommand:\n" + Joiner.on(" ").join(cmd)); // to avoid auto-refresh by ProfileOutputServlet, refreshDelay can be specified // via url param @@ -290,10 +246,10 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res } else { setResponseHeader(resp); resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); - resp.getWriter().write( - "Unable to acquire lock. Another instance of profiler might be running."); - LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + - " seconds. Another instance of profiler might be running."); + resp.getWriter() + .write("Unable to acquire lock. Another instance of profiler might be running."); + LOG.warn("Unable to acquire lock in " + lockTimeoutSecs + + " seconds. Another instance of profiler might be running."); } } catch (InterruptedException e) { LOG.warn("Interrupted while acquiring profile lock.", e); @@ -389,10 +345,10 @@ protected void doGet(final HttpServletRequest req, final HttpServletResponse res throws IOException { resp.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); setResponseHeader(resp); - resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" + - "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + - "environment is properly configured. For more information please see\n" + - "http://hbase.apache.org/book.html#profiler\n"); + resp.getWriter().write("The profiler servlet was disabled at startup.\n\n" + + "Please ensure the prerequisites for the Profiler Servlet have been installed and the\n" + + "environment is properly configured. For more information please see\n" + + "http://hbase.apache.org/book.html#profiler\n"); return; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java index 182a4e10996d..a8a561c97dea 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ProxyUserAuthenticationFilter.java @@ -17,18 +17,6 @@ */ package org.apache.hadoop.hbase.http; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.security.authorize.ProxyUsers; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.apache.hadoop.util.HttpExceptionUtils; -import org.apache.hadoop.util.StringUtils; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.security.Principal; import java.util.ArrayList; @@ -43,30 +31,32 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.apache.hadoop.security.authorize.AuthorizationException; +import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.hadoop.util.HttpExceptionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This file has been copied directly (changing only the package name and and the ASF license - * text format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase - * depends on doesn't have it yet - * (as of 2020 Apr 24, there is no Hadoop release that has it either). - * - * Hadoop version: - * unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 - * - * Haddop path: + * This file has been copied directly (changing only the package name and and the ASF license text + * format, and adding the Yetus annotations) from Hadoop, as the Hadoop version that HBase depends + * on doesn't have it yet (as of 2020 Apr 24, there is no Hadoop release that has it either). Hadoop + * version: unreleased, master branch commit 4ea6c2f457496461afc63f38ef4cef3ab0efce49 Haddop path: * hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authentication/ - * server/ProxyUserAuthenticationFilter.java - * - * AuthenticationFilter which adds support to perform operations - * using end user instead of proxy user. Fetches the end user from - * doAs Query Parameter. + * server/ProxyUserAuthenticationFilter.java AuthenticationFilter which adds support to perform + * operations using end user instead of proxy user. Fetches the end user from doAs Query Parameter. */ @InterfaceAudience.Private @InterfaceStability.Evolving public class ProxyUserAuthenticationFilter extends AuthenticationFilter { - private static final Logger LOG = LoggerFactory.getLogger( - ProxyUserAuthenticationFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(ProxyUserAuthenticationFilter.class); private static final String DO_AS = "doas"; public static final String PROXYUSER_PREFIX = "proxyuser"; @@ -85,14 +75,13 @@ protected void doFilter(FilterChain filterChain, HttpServletRequest request, String doAsUser = lowerCaseRequest.getParameter(DO_AS); if (doAsUser != null && !doAsUser.equals(request.getRemoteUser())) { - LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", - doAsUser, request.getRemoteUser(), request.getRemoteAddr()); - UserGroupInformation requestUgi = (request.getUserPrincipal() != null) ? - UserGroupInformation.createRemoteUser(request.getRemoteUser()) + LOG.debug("doAsUser = {}, RemoteUser = {} , RemoteAddress = {} ", doAsUser, + request.getRemoteUser(), request.getRemoteAddr()); + UserGroupInformation requestUgi = (request.getUserPrincipal() != null) + ? UserGroupInformation.createRemoteUser(request.getRemoteUser()) : null; if (requestUgi != null) { - requestUgi = UserGroupInformation.createProxyUser(doAsUser, - requestUgi); + requestUgi = UserGroupInformation.createProxyUser(doAsUser, requestUgi); try { ProxyUsers.authorize(requestUgi, request.getRemoteAddr()); @@ -116,7 +105,7 @@ public String getName() { LOG.debug("Proxy user Authentication successful"); } catch (AuthorizationException ex) { HttpExceptionUtils.createServletExceptionResponse(response, - HttpServletResponse.SC_FORBIDDEN, ex); + HttpServletResponse.SC_FORBIDDEN, ex); LOG.warn("Proxy user Authentication exception", ex); return; } @@ -140,8 +129,8 @@ protected Configuration getProxyuserConfiguration(FilterConfig filterConfig) } static boolean containsUpperCase(final Iterable strings) { - for(String s : strings) { - for(int i = 0; i < s.length(); i++) { + for (String s : strings) { + for (int i = 0; i < s.length(); i++) { if (Character.isUpperCase(s.charAt(i))) { return true; } @@ -151,17 +140,16 @@ static boolean containsUpperCase(final Iterable strings) { } /** - * The purpose of this function is to get the doAs parameter of a http request - * case insensitively + * The purpose of this function is to get the doAs parameter of a http request case insensitively * @param request * @return doAs parameter if exists or null otherwise */ - public static String getDoasFromHeader(final HttpServletRequest request) { + public static String getDoasFromHeader(final HttpServletRequest request) { String doas = null; final Enumeration headers = request.getHeaderNames(); - while (headers.hasMoreElements()){ + while (headers.hasMoreElements()) { String header = headers.nextElement(); - if (header.toLowerCase().equals("doas")){ + if (header.toLowerCase().equals("doas")) { doas = request.getHeader(header); break; } @@ -169,11 +157,9 @@ public static String getDoasFromHeader(final HttpServletRequest request) { return doas; } - public static HttpServletRequest toLowerCase( - final HttpServletRequest request) { + public static HttpServletRequest toLowerCase(final HttpServletRequest request) { @SuppressWarnings("unchecked") - final Map original = (Map) - request.getParameterMap(); + final Map original = (Map) request.getParameterMap(); if (!containsUpperCase(original.keySet())) { return request; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java index f00f2a195af0..77fca8421710 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/SecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,16 +6,15 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http; import java.io.IOException; @@ -37,10 +36,10 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class SecurityHeadersFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(SecurityHeadersFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(SecurityHeadersFilter.class); private static final String DEFAULT_HSTS = "max-age=63072000;includeSubDomains;preload"; - private static final String DEFAULT_CSP = "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; + private static final String DEFAULT_CSP = + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"; private FilterConfig filterConfig; @Override diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java index 8f338a7af68a..9c99b0ab8dc7 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/ServerConfigurationKeys.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,8 +21,7 @@ import org.apache.yetus.audience.InterfaceStability; /** - * This interface contains constants for configuration keys used - * in the hbase http server code. + * This interface contains constants for configuration keys used in the hbase http server code. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -34,11 +33,9 @@ public interface ServerConfigurationKeys { public static final boolean HBASE_SSL_ENABLED_DEFAULT = false; /** Enable/Disable aliases serving from jetty */ - public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = - "hbase.jetty.logs.serve.aliases"; + public static final String HBASE_JETTY_LOGS_SERVE_ALIASES = "hbase.jetty.logs.serve.aliases"; - public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = - true; + public static final boolean DEFAULT_HBASE_JETTY_LOGS_SERVE_ALIASES = true; public static final String HBASE_HTTP_STATIC_USER = "hbase.http.staticuser.user"; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java index 05ca9a3abd19..bb545e99f0ba 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/conf/ConfServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +19,19 @@ import java.io.IOException; import java.io.Writer; - import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.http.HttpServer; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * A servlet to print out the running configuration data. */ -@InterfaceAudience.LimitedPrivate({"HBase"}) +@InterfaceAudience.LimitedPrivate({ "HBase" }) @InterfaceStability.Unstable public class ConfServlet extends HttpServlet { private static final long serialVersionUID = 1L; @@ -44,12 +41,12 @@ public class ConfServlet extends HttpServlet { private static final String FORMAT_PARAM = "format"; /** - * Return the Configuration of the daemon hosting this servlet. - * This is populated when the HttpServer starts. + * Return the Configuration of the daemon hosting this servlet. This is populated when the + * HttpServer starts. */ private Configuration getConfFromContext() { - Configuration conf = (Configuration)getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + Configuration conf = + (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); assert conf != null; return conf; } @@ -57,8 +54,7 @@ private Configuration getConfFromContext() { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { - if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), - request, response)) { + if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(), request, response)) { return; } @@ -86,7 +82,7 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) * Guts of the servlet - extracted for easy testing. */ static void writeResponse(Configuration conf, Writer out, String format) - throws IOException, BadFormatException { + throws IOException, BadFormatException { if (FORMAT_JSON.equals(format)) { Configuration.dumpConfiguration(conf, out); } else if (FORMAT_XML.equals(format)) { diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java index fdcd34783c04..f501e1648599 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/ByteArraySerializer.java @@ -20,6 +20,7 @@ import java.lang.reflect.Type; import org.apache.hadoop.hbase.util.Bytes; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.gson.JsonElement; import org.apache.hbase.thirdparty.com.google.gson.JsonPrimitive; import org.apache.hbase.thirdparty.com.google.gson.JsonSerializationContext; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java index c75113ded730..3379c6e25562 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/gson/GsonMessageBodyWriter.java @@ -32,6 +32,7 @@ import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + import org.apache.hbase.thirdparty.com.google.gson.Gson; import org.apache.hbase.thirdparty.javax.ws.rs.Produces; import org.apache.hbase.thirdparty.javax.ws.rs.WebApplicationException; @@ -56,20 +57,14 @@ public GsonMessageBodyWriter(Gson gson) { @Override public boolean isWriteable(Class type, Type genericType, Annotation[] annotations, - MediaType mediaType) { + MediaType mediaType) { return mediaType == null || MediaType.APPLICATION_JSON_TYPE.isCompatible(mediaType); } @Override - public void writeTo( - T t, - Class type, - Type genericType, - Annotation[] annotations, - MediaType mediaType, - MultivaluedMap httpHeaders, - OutputStream entityStream - ) throws IOException, WebApplicationException { + public void writeTo(T t, Class type, Type genericType, Annotation[] annotations, + MediaType mediaType, MultivaluedMap httpHeaders, OutputStream entityStream) + throws IOException, WebApplicationException { final Charset outputCharset = requestedCharset(mediaType); try (Writer writer = new OutputStreamWriter(entityStream, outputCharset)) { gson.toJson(t, writer); @@ -77,23 +72,20 @@ public void writeTo( } private static Charset requestedCharset(MediaType mediaType) { - return Optional.ofNullable(mediaType) - .map(MediaType::getParameters) - .map(params -> params.get("charset")) - .map(c -> { - try { - return Charset.forName(c); - } catch (IllegalCharsetNameException e) { - logger.debug("Client requested illegal Charset '{}'", c); - return null; - } catch (UnsupportedCharsetException e) { - logger.debug("Client requested unsupported Charset '{}'", c); - return null; - } catch (Exception e) { - logger.debug("Error while resolving Charset '{}'", c, e); - return null; - } - }) - .orElse(StandardCharsets.UTF_8); + return Optional.ofNullable(mediaType).map(MediaType::getParameters) + .map(params -> params.get("charset")).map(c -> { + try { + return Charset.forName(c); + } catch (IllegalCharsetNameException e) { + logger.debug("Client requested illegal Charset '{}'", c); + return null; + } catch (UnsupportedCharsetException e) { + logger.debug("Client requested unsupported Charset '{}'", c); + return null; + } catch (Exception e) { + logger.debug("Error while resolving Charset '{}'", c, e); + return null; + } + }).orElse(StandardCharsets.UTF_8); } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java index dc3f8a7bf430..d59ec3846da7 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/ResponseEntityMapper.java @@ -19,6 +19,7 @@ import java.io.IOException; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerRequestContext; import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerResponseContext; @@ -34,10 +35,8 @@ public class ResponseEntityMapper implements ContainerResponseFilter { @Override - public void filter( - ContainerRequestContext requestContext, - ContainerResponseContext responseContext - ) throws IOException { + public void filter(ContainerRequestContext requestContext, + ContainerResponseContext responseContext) throws IOException { /* * Follows very loosely the top-level document specification described in by JSON API. Only * handles 200 response codes; leaves room for errors and other response types. diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java index 57a7e930905b..0c7b869fece5 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jersey/SupplierFactoryAdapter.java @@ -19,6 +19,7 @@ import java.util.function.Supplier; import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hbase.thirdparty.org.glassfish.hk2.api.Factory; /** @@ -34,9 +35,12 @@ public SupplierFactoryAdapter(Supplier supplier) { this.supplier = supplier; } - @Override public T provide() { + @Override + public T provide() { return supplier.get(); } - @Override public void dispose(T instance) { } + @Override + public void dispose(T instance) { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java index 7e3a79df9d3f..4eefd16ceb32 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -14,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http.jmx; import java.io.IOException; @@ -36,33 +36,27 @@ import org.slf4j.LoggerFactory; /* - * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has - * been rewritten to be read only and to output in a JSON format so it is not - * really that close to the original. + * This servlet is based off of the JMXProxyServlet from Tomcat 7.0.14. It has been rewritten to be + * read only and to output in a JSON format so it is not really that close to the original. */ /** * Provides Read only web access to JMX. *

    - * This servlet generally will be placed under the /jmx URL for each - * HttpServer. It provides read only - * access to JMX metrics. The optional qry parameter - * may be used to query only a subset of the JMX Beans. This query - * functionality is provided through the - * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} - * method. + * This servlet generally will be placed under the /jmx URL for each HttpServer. It provides read + * only access to JMX metrics. The optional qry parameter may be used to query only a + * subset of the JMX Beans. This query functionality is provided through the + * {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)} method. *

    *

    - * For example http://.../jmx?qry=Hadoop:* will return - * all hadoop metrics exposed through JMX. + * For example http://.../jmx?qry=Hadoop:* will return all hadoop metrics exposed + * through JMX. *

    *

    - * The optional get parameter is used to query an specific - * attribute of a JMX bean. The format of the URL is - * http://.../jmx?get=MXBeanName::AttributeName + * The optional get parameter is used to query an specific attribute of a JMX bean. The + * format of the URL is http://.../jmx?get=MXBeanName::AttributeName *

    *

    - * For example - * + * For example * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId * will return the cluster id of the namenode mxbean. *

    @@ -72,8 +66,7 @@ * http://.../jmx?get=MXBeanName::*[RegExp1],*[RegExp2] *

    *

    - * For example - * + * For example *

    * http://../jmx?get=Hadoop:service=HBase,name=RegionServer,sub=Tables::[a-zA-z_0-9]*memStoreSize *

    @@ -82,17 +75,19 @@ *

    *
    *

    - * If the qry or the get parameter is not formatted - * correctly then a 400 BAD REQUEST http response code will be returned. + * If the qry or the get parameter is not formatted correctly then a 400 + * BAD REQUEST http response code will be returned. *

    *

    - * If a resouce such as a mbean or attribute can not be found, - * a 404 SC_NOT_FOUND http response code will be returned. + * If a resouce such as a mbean or attribute can not be found, a 404 SC_NOT_FOUND http response code + * will be returned. *

    *

    * The return format is JSON and in the form *

    - *
    
    + * 
    + * 
    + * 
      *  {
      *    "beans" : [
      *      {
    @@ -101,28 +96,18 @@
      *      }
      *    ]
      *  }
    - *  
    - *

    - * The servlet attempts to convert the the JMXBeans into JSON. Each - * bean's attributes will be converted to a JSON object member. - * - * If the attribute is a boolean, a number, a string, or an array - * it will be converted to the JSON equivalent. - * - * If the value is a {@link CompositeData} then it will be converted - * to a JSON object with the keys as the name of the JSON member and - * the value is converted following these same rules. - * - * If the value is a {@link TabularData} then it will be converted - * to an array of the {@link CompositeData} elements that it contains. - * - * All other objects will be converted to a string and output as such. - * - * The bean's name and modelerType will be returned for all beans. - * - * Optional paramater "callback" should be used to deliver JSONP response. + * + *

    + *

    + * The servlet attempts to convert the the JMXBeans into JSON. Each bean's attributes will be + * converted to a JSON object member. If the attribute is a boolean, a number, a string, or an array + * it will be converted to the JSON equivalent. If the value is a {@link CompositeData} then it will + * be converted to a JSON object with the keys as the name of the JSON member and the value is + * converted following these same rules. If the value is a {@link TabularData} then it will be + * converted to an array of the {@link CompositeData} elements that it contains. All other objects + * will be converted to a string and output as such. The bean's name and modelerType will be + * returned for all beans. Optional paramater "callback" should be used to deliver JSONP response. *

    - * */ @InterfaceAudience.Private public class JMXJsonServlet extends HttpServlet { @@ -157,11 +142,8 @@ public void init() throws ServletException { /** * Process a GET request for the specified resource. - * - * @param request - * The servlet request we are processing - * @param response - * The servlet response we are creating + * @param request The servlet request we are processing + * @param response The servlet response we are creating */ @Override public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { @@ -199,8 +181,8 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro response.setStatus(HttpServletResponse.SC_BAD_REQUEST); return; } - if (beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), - splitStrings[1], description) != 0) { + if (beanWriter.write(this.mBeanServer, new ObjectName(splitStrings[0]), splitStrings[1], + description) != 0) { beanWriter.flush(); response.setStatus(HttpServletResponse.SC_BAD_REQUEST); } @@ -237,10 +219,9 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro } /** - * Verifies that the callback property, if provided, is purely alphanumeric. - * This prevents a malicious callback name (that is javascript code) from being - * returned by the UI to an unsuspecting user. - * + * Verifies that the callback property, if provided, is purely alphanumeric. This prevents a + * malicious callback name (that is javascript code) from being returned by the UI to an + * unsuspecting user. * @param callbackName The callback name, can be null. * @return The callback name * @throws IOException If the name is disallowed. diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java index 21667d779a34..cfda42dda9e8 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java @@ -1,18 +1,12 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java index 72cedddd686b..4d40c9fb3d26 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/StaticUserWebFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import java.io.IOException; import java.security.Principal; import java.util.HashMap; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -32,7 +31,6 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.http.FilterContainer; @@ -42,8 +40,8 @@ import org.slf4j.LoggerFactory; /** - * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) - * so that the web UI is usable for a secure cluster without authentication. + * Provides a servlet filter that pretends to authenticate a fake user (Dr.Who) so that the web UI + * is usable for a secure cluster without authentication. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) public class StaticUserWebFilter extends FilterInitializer { @@ -53,17 +51,21 @@ public class StaticUserWebFilter extends FilterInitializer { static class User implements Principal { private final String name; + public User(String name) { this.name = name; } + @Override public String getName() { return name; } + @Override public int hashCode() { return name.hashCode(); } + @Override public boolean equals(Object other) { if (other == this) { @@ -73,6 +75,7 @@ public boolean equals(Object other) { } return ((User) other).name.equals(name); } + @Override public String toString() { return name; @@ -90,20 +93,19 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain - ) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { HttpServletRequest httpRequest = (HttpServletRequest) request; // if the user is already authenticated, don't override it if (httpRequest.getRemoteUser() != null) { chain.doFilter(request, response); } else { - HttpServletRequestWrapper wrapper = - new HttpServletRequestWrapper(httpRequest) { + HttpServletRequestWrapper wrapper = new HttpServletRequestWrapper(httpRequest) { @Override public Principal getUserPrincipal() { return user; } + @Override public String getRemoteUser() { return username; @@ -128,9 +130,7 @@ public void initFilter(FilterContainer container, Configuration conf) { String username = getUsernameFromConf(conf); options.put(HBASE_HTTP_STATIC_USER, username); - container.addFilter("static_user_filter", - StaticUserFilter.class.getName(), - options); + container.addFilter("static_user_filter", StaticUserFilter.class.getName(), options); } /** @@ -141,13 +141,12 @@ static String getUsernameFromConf(Configuration conf) { if (oldStyleUgi != null) { // We can't use the normal configuration deprecation mechanism here // since we need to split out the username from the configured UGI. - LOG.warn(DEPRECATED_UGI_KEY + " should not be used. Instead, use " + - HBASE_HTTP_STATIC_USER + "."); + LOG.warn( + DEPRECATED_UGI_KEY + " should not be used. Instead, use " + HBASE_HTTP_STATIC_USER + "."); String[] parts = oldStyleUgi.split(","); return parts[0]; } else { - return conf.get(HBASE_HTTP_STATIC_USER, - DEFAULT_HBASE_HTTP_STATIC_USER); + return conf.get(HBASE_HTTP_STATIC_USER, DEFAULT_HBASE_HTTP_STATIC_USER); } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java index 7bb9a0faa7d5..734534c33f94 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java @@ -1,36 +1,29 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** *

    - * This package provides user-selectable (via configuration) classes that add - * functionality to the web UI. They are configured as a list of classes in the - * configuration parameter hadoop.http.filter.initializers. + * This package provides user-selectable (via configuration) classes that add functionality to the + * web UI. They are configured as a list of classes in the configuration parameter + * hadoop.http.filter.initializers. *

    *
      - *
    • StaticUserWebFilter - An authorization plugin that makes all - * users a static configured user. + *
    • StaticUserWebFilter - An authorization plugin that makes all users a static configured + * user. *
    *

    * Copied from hadoop source code.
    * See https://issues.apache.org/jira/browse/HADOOP-10232 to know why *

    */ -@InterfaceAudience.LimitedPrivate({"HBase"}) +@InterfaceAudience.LimitedPrivate({ "HBase" }) @InterfaceStability.Unstable package org.apache.hadoop.hbase.http.lib; diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java index 611316d9ec67..ddcccc858689 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/log/LogLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -74,9 +74,7 @@ public static void main(String[] args) throws Exception { * Valid command line options. */ private enum Operations { - GETLEVEL, - SETLEVEL, - UNKNOWN + GETLEVEL, SETLEVEL, UNKNOWN } private static void printUsage() { @@ -85,8 +83,7 @@ private static void printUsage() { } public static boolean isValidProtocol(String protocol) { - return ((protocol.equals(PROTOCOL_HTTP) || - protocol.equals(PROTOCOL_HTTPS))); + return ((protocol.equals(PROTOCOL_HTTP) || protocol.equals(PROTOCOL_HTTPS))); } static class CLI extends Configured implements Tool { @@ -116,8 +113,7 @@ public int run(String[] args) throws Exception { * @throws HadoopIllegalArgumentException if arguments are invalid. * @throws Exception if unable to connect */ - private void sendLogLevelRequest() - throws HadoopIllegalArgumentException, Exception { + private void sendLogLevelRequest() throws HadoopIllegalArgumentException, Exception { switch (operation) { case GETLEVEL: doGetLevel(); @@ -126,13 +122,11 @@ private void sendLogLevelRequest() doSetLevel(); break; default: - throw new HadoopIllegalArgumentException( - "Expect either -getlevel or -setlevel"); + throw new HadoopIllegalArgumentException("Expect either -getlevel or -setlevel"); } } - public void parseArguments(String[] args) throws - HadoopIllegalArgumentException { + public void parseArguments(String[] args) throws HadoopIllegalArgumentException { if (args.length == 0) { throw new HadoopIllegalArgumentException("No arguments specified"); } @@ -149,15 +143,13 @@ public void parseArguments(String[] args) throws nextArgIndex = parseProtocolArgs(args, nextArgIndex); break; default: - throw new HadoopIllegalArgumentException( - "Unexpected argument " + args[nextArgIndex]); + throw new HadoopIllegalArgumentException("Unexpected argument " + args[nextArgIndex]); } } // if operation is never specified in the arguments if (operation == Operations.UNKNOWN) { - throw new HadoopIllegalArgumentException( - "Must specify either -getlevel or -setlevel"); + throw new HadoopIllegalArgumentException("Must specify either -getlevel or -setlevel"); } // if protocol is unspecified, set it as http. @@ -166,8 +158,7 @@ public void parseArguments(String[] args) throws } } - private int parseGetLevelArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseGetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { // fail if multiple operations are specified in the arguments if (operation != Operations.UNKNOWN) { throw new HadoopIllegalArgumentException("Redundant -getlevel command"); @@ -182,8 +173,7 @@ private int parseGetLevelArgs(String[] args, int index) throws return index + 3; } - private int parseSetLevelArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseSetLevelArgs(String[] args, int index) throws HadoopIllegalArgumentException { // fail if multiple operations are specified in the arguments if (operation != Operations.UNKNOWN) { throw new HadoopIllegalArgumentException("Redundant -setlevel command"); @@ -199,30 +189,25 @@ private int parseSetLevelArgs(String[] args, int index) throws return index + 4; } - private int parseProtocolArgs(String[] args, int index) throws - HadoopIllegalArgumentException { + private int parseProtocolArgs(String[] args, int index) throws HadoopIllegalArgumentException { // make sure only -protocol is specified if (protocol != null) { - throw new HadoopIllegalArgumentException( - "Redundant -protocol command"); + throw new HadoopIllegalArgumentException("Redundant -protocol command"); } // check number of arguments is sufficient if (index + 1 >= args.length) { - throw new HadoopIllegalArgumentException( - "-protocol needs one parameter"); + throw new HadoopIllegalArgumentException("-protocol needs one parameter"); } // check protocol is valid protocol = args[index + 1]; if (!isValidProtocol(protocol)) { - throw new HadoopIllegalArgumentException( - "Invalid protocol: " + protocol); + throw new HadoopIllegalArgumentException("Invalid protocol: " + protocol); } return index + 2; } /** * Send HTTP request to get log level. - * * @throws HadoopIllegalArgumentException if arguments are invalid. * @throws Exception if unable to connect */ @@ -232,20 +217,16 @@ private void doGetLevel() throws Exception { /** * Send HTTP request to set log level. - * * @throws HadoopIllegalArgumentException if arguments are invalid. * @throws Exception if unable to connect */ private void doSetLevel() throws Exception { - process(protocol + "://" + hostName + "/logLevel?log=" + className - + "&level=" + level); + process(protocol + "://" + hostName + "/logLevel?log=" + className + "&level=" + level); } /** - * Connect to the URL. Supports HTTP and supports SPNEGO - * authentication. It falls back to simple authentication if it fails to - * initiate SPNEGO. - * + * Connect to the URL. Supports HTTP and supports SPNEGO authentication. It falls back to simple + * authentication if it fails to initiate SPNEGO. * @param url the URL address of the daemon servlet * @return a connected connection * @throws Exception if it can not establish a connection. @@ -274,8 +255,7 @@ private HttpURLConnection connect(URL url) throws Exception { } /** - * Configures the client to send HTTP request to the URL. - * Supports SPENGO for authentication. + * Configures the client to send HTTP request to the URL. Supports SPENGO for authentication. * @param urlString URL and query string to the daemon's web UI * @throws Exception if unable to connect */ @@ -289,9 +269,10 @@ private void process(String urlString) throws Exception { // read from the servlet - try (InputStreamReader streamReader = - new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); - BufferedReader bufferedReader = new BufferedReader(streamReader)) { + try ( + InputStreamReader streamReader = + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8); + BufferedReader bufferedReader = new BufferedReader(streamReader)) { bufferedReader.lines().filter(Objects::nonNull).filter(line -> line.startsWith(MARKER)) .forEach(line -> System.out.println(TAG.matcher(line).replaceAll(""))); } catch (IOException ioe) { @@ -312,19 +293,16 @@ public static class Servlet extends HttpServlet { @Override public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { + throws ServletException, IOException { // Do the authorization - if (!HttpServer.hasAdministratorAccess(getServletContext(), request, - response)) { + if (!HttpServer.hasAdministratorAccess(getServletContext(), request, response)) { return; } // Disallow modification of the LogLevel if explicitly set to readonly - Configuration conf = (Configuration) getServletContext().getAttribute( - HttpServer.CONF_CONTEXT_ATTRIBUTE); + Configuration conf = + (Configuration) getServletContext().getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE); if (conf.getBoolean("hbase.master.ui.readonly", false)) { - sendError( - response, - HttpServletResponse.SC_FORBIDDEN, + sendError(response, HttpServletResponse.SC_FORBIDDEN, "Modification of HBase via the UI is disallowed in configuration."); return; } @@ -347,17 +325,13 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) if (logName != null) { out.println("

    Results:

    "); - out.println(MARKER - + "Submitted Log Name: " + logName + "
    "); + out.println(MARKER + "Submitted Log Name: " + logName + "
    "); Logger log = LoggerFactory.getLogger(logName); - out.println(MARKER - + "Log Class: " + log.getClass().getName() +"
    "); + out.println(MARKER + "Log Class: " + log.getClass().getName() + "
    "); if (level != null) { if (!isLogLevelChangeAllowed(logName, readOnlyLogLevels)) { - sendError( - response, - HttpServletResponse.SC_PRECONDITION_FAILED, + sendError(response, HttpServletResponse.SC_PRECONDITION_FAILED, "Modification of logger " + logName + " is disallowed in configuration."); return; } @@ -390,7 +364,7 @@ private boolean isLogLevelChangeAllowed(String logger, String[] readOnlyLogLevel } private void sendError(HttpServletResponse response, int code, String message) - throws IOException { + throws IOException { response.setStatus(code, message); response.sendError(code, message); } @@ -420,17 +394,18 @@ private static void process(Logger logger, String levelName, PrintWriter out) { if (levelName != null) { try { Log4jUtils.setLogLevel(logger.getName(), levelName); - out.println(MARKER + "
    " + "Setting Level to " + - levelName + " ...
    " + "
    "); + out.println(MARKER + "
    " + "Setting Level to " + + levelName + " ...
    " + "
    "); } catch (IllegalArgumentException e) { - out.println(MARKER + "
    " + "Bad level : " + levelName + - "
    " + "
    "); + out.println(MARKER + "
    " + "Bad level : " + levelName + + "
    " + "
    "); } } - out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + - "
    "); + out.println(MARKER + "Effective level: " + Log4jUtils.getEffectiveLevel(logger.getName()) + + "
    "); } } - private LogLevel() {} + private LogLevel() { + } } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/package-info.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/package-info.java index f55e24baa952..d70b57755444 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/http/package-info.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/http/package-info.java @@ -1,19 +1,12 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** *

    diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java index def2611b3a5f..2b1aaa9aa968 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONBean.java @@ -1,12 +1,13 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -26,7 +27,6 @@ import java.util.Iterator; import java.util.Set; import java.util.regex.Pattern; - import javax.management.AttributeNotFoundException; import javax.management.InstanceNotFoundException; import javax.management.IntrospectionException; @@ -42,13 +42,13 @@ import javax.management.openmbean.CompositeData; import javax.management.openmbean.CompositeType; import javax.management.openmbean.TabularData; - -import org.apache.hbase.thirdparty.com.google.gson.Gson; -import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hbase.thirdparty.com.google.gson.Gson; +import org.apache.hbase.thirdparty.com.google.gson.stream.JsonWriter; + /** * Utility for doing JSON and MBeans. */ @@ -388,7 +388,7 @@ private static void writeObject(JsonWriter writer, Object value) throws IOExcept */ public static void dumpAllBeans() throws IOException, MalformedObjectNameException { try (PrintWriter writer = - new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8))) { + new PrintWriter(new OutputStreamWriter(System.out, StandardCharsets.UTF_8))) { JSONBean dumper = new JSONBean(); try (JSONBean.Writer jsonBeanWriter = dumper.open(writer)) { MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java index 6e155ae39616..3d57c8dcd3f6 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/JSONMetricUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -14,7 +14,7 @@ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. - * */ + */ package org.apache.hadoop.hbase.util; import java.beans.IntrospectionException; @@ -78,8 +78,8 @@ public static Object getValueFromMBean(ObjectName bean, String attribute) { try { value = mbServer.getAttribute(bean, attribute); } catch (Exception e) { - LOG.error("Unable to get value from MBean= " + bean.toString() + "for attribute=" + - attribute + " " + e.getMessage()); + LOG.error("Unable to get value from MBean= " + bean.toString() + "for attribute=" + attribute + + " " + e.getMessage()); } return value; } diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java index 2b36c21c6167..c5d1a8a47e64 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/LogMonitoring.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java index 7ed09468cb67..fc1d523b0ef1 100644 --- a/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java +++ b/hbase-http/src/main/java/org/apache/hadoop/hbase/util/ProcessUtils.java @@ -20,9 +20,7 @@ import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,7 +31,8 @@ public final class ProcessUtils { private static Logger LOG = LoggerFactory.getLogger(ProcessUtils.class); - private ProcessUtils() { } + private ProcessUtils() { + } public static Integer getPid() { // JVM_PID is exported by bin/hbase run script diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java index 7f1223980e3d..43be7ccb4f4c 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/HttpServerFunctionalTest.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http; import java.io.BufferedReader; @@ -38,8 +37,8 @@ import org.slf4j.LoggerFactory; /** - * This is a base class for functional tests of the {@link HttpServer}. - * The methods are static for other classes to import statically. + * This is a base class for functional tests of the {@link HttpServer}. The methods are static for + * other classes to import statically. */ public class HttpServerFunctionalTest extends Assert { private static final Logger LOG = LoggerFactory.getLogger(HttpServerFunctionalTest.class); @@ -52,11 +51,9 @@ public class HttpServerFunctionalTest extends Assert { private static final String TEST = "test"; /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. - * + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @return the server instance - * * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ @@ -66,16 +63,14 @@ public static HttpServer createTestServer() throws IOException { } /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @param conf the server configuration to use * @return the server instance - * * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ - public static HttpServer createTestServer(Configuration conf) - throws IOException { + public static HttpServer createTestServer(Configuration conf) throws IOException { prepareTestWebapp(); return createServer(TEST, conf); } @@ -87,55 +82,50 @@ public static HttpServer createTestServer(Configuration conf, AccessControlList } /** - * Create but do not start the test webapp server. The test webapp dir is - * prepared/checked in advance. + * Create but do not start the test webapp server. The test webapp dir is prepared/checked in + * advance. * @param conf the server configuration to use * @return the server instance - * * @throws IOException if a problem occurs * @throws AssertionError if a condition was not met */ - public static HttpServer createTestServer(Configuration conf, - String[] pathSpecs) throws IOException { + public static HttpServer createTestServer(Configuration conf, String[] pathSpecs) + throws IOException { prepareTestWebapp(); return createServer(TEST, conf, pathSpecs); } public static HttpServer createTestServerWithSecurity(Configuration conf) throws IOException { - prepareTestWebapp(); - return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) - // InfoServer normally sets these for us - .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .build(); - } + prepareTestWebapp(); + return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) + // InfoServer normally sets these for us + .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY).build(); + } - public static HttpServer createTestServerWithSecurityAndAcl(Configuration conf, AccessControlList acl) throws IOException { + public static HttpServer createTestServerWithSecurityAndAcl(Configuration conf, + AccessControlList acl) throws IOException { prepareTestWebapp(); return localServerBuilder(TEST).setFindPort(true).setConf(conf).setSecurityEnabled(true) // InfoServer normally sets these for us .setUsernameConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_PRINCIPAL_KEY) - .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY) - .setSecurityEnabled(true) - .setACL(acl) - .build(); + .setKeytabConfKey(HttpServer.HTTP_SPNEGO_AUTHENTICATION_KEYTAB_KEY).setSecurityEnabled(true) + .setACL(acl).build(); } /** - * Prepare the test webapp by creating the directory from the test properties - * fail if the directory cannot be created. + * Prepare the test webapp by creating the directory from the test properties fail if the + * directory cannot be created. * @throws AssertionError if a condition was not met */ protected static void prepareTestWebapp() { String webapps = System.getProperty(TEST_BUILD_WEBAPPS, BUILD_WEBAPPS_DIR); - File testWebappDir = new File(webapps + - File.separatorChar + TEST); + File testWebappDir = new File(webapps + File.separatorChar + TEST); try { if (!testWebappDir.exists()) { fail("Test webapp dir " + testWebappDir.getCanonicalPath() + " missing"); } - } - catch (IOException e) { + } catch (IOException e) { } } @@ -146,12 +136,10 @@ protected static void prepareTestWebapp() { * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String host, int port) - throws IOException { + public static HttpServer createServer(String host, int port) throws IOException { prepareTestWebapp(); return new HttpServer.Builder().setName(TEST) - .addEndpoint(URI.create("http://" + host + ":" + port)) - .setFindPort(true).build(); + .addEndpoint(URI.create("http://" + host + ":" + port)).setFindPort(true).build(); } /** @@ -163,6 +151,7 @@ public static HttpServer createServer(String host, int port) public static HttpServer createServer(String webapp) throws IOException { return localServerBuilder(webapp).setFindPort(true).build(); } + /** * Create an HttpServer instance for the given webapp * @param webapp the webapp to work with @@ -170,8 +159,7 @@ public static HttpServer createServer(String webapp) throws IOException { * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String webapp, Configuration conf) - throws IOException { + public static HttpServer createServer(String webapp, Configuration conf) throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).build(); } @@ -181,8 +169,7 @@ public static HttpServer createServer(String webapp, Configuration conf, } private static Builder localServerBuilder(String webapp) { - return new HttpServer.Builder().setName(webapp).addEndpoint( - URI.create("http://localhost:0")); + return new HttpServer.Builder().setName(webapp).addEndpoint(URI.create("http://localhost:0")); } /** @@ -193,17 +180,15 @@ private static Builder localServerBuilder(String webapp) { * @return the server * @throws IOException if it could not be created */ - public static HttpServer createServer(String webapp, Configuration conf, - String[] pathSpecs) throws IOException { + public static HttpServer createServer(String webapp, Configuration conf, String[] pathSpecs) + throws IOException { return localServerBuilder(webapp).setFindPort(true).setConf(conf).setPathSpec(pathSpecs) - .build(); + .build(); } /** * Create and start a server with the test webapp - * * @return the newly started server - * * @throws IOException on any failure * @throws AssertionError if a condition was not met */ @@ -230,11 +215,9 @@ public static void stop(HttpServer server) throws Exception { * @return a URL bonded to the base of the server * @throws MalformedURLException if the URL cannot be created. */ - public static URL getServerURL(HttpServer server) - throws MalformedURLException { + public static URL getServerURL(HttpServer server) throws MalformedURLException { assertNotNull("No server", server); - return new URL("http://" - + NetUtils.getHostPortString(server.getConnectorAddress(0))); + return new URL("http://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); } /** @@ -297,15 +280,14 @@ public static void access(String urlstring) throws IOException { URLConnection connection = url.openConnection(); connection.connect(); - try (BufferedReader in = new BufferedReader(new InputStreamReader( - connection.getInputStream(), StandardCharsets.UTF_8))){ - for(; in.readLine() != null;) { + try (BufferedReader in = new BufferedReader( + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8))) { + for (; in.readLine() != null;) { continue; } - } catch(IOException ioe) { + } catch (IOException ioe) { LOG.info("Got exception: ", ioe); } } - } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java index 1917655d3426..cbac5b193943 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestGlobalFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestGlobalFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -63,12 +63,12 @@ public void destroy() { @Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) - throws IOException, ServletException { + throws IOException, ServletException { if (filterConfig == null) { return; } - String uri = ((HttpServletRequest)request).getRequestURI(); + String uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); RECORDS.add(uri); chain.doFilter(request, response); @@ -76,7 +76,8 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha /** Configuration for RecordingFilter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -89,9 +90,8 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testServletFilter() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - RecordingFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, RecordingFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); http.start(); @@ -106,14 +106,11 @@ public void testServletFilter() throws Exception { final String outURL = "/static/a.out"; final String logURL = "/logs/a.log"; - final String[] urls = { - fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, allURL, - outURL, logURL - }; + final String[] urls = { fsckURL, stacksURL, ajspURL, listPathsURL, dataURL, streamFile, rootURL, + allURL, outURL, logURL }; - //access the urls - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + // access the urls + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (String url : urls) { access(prefix + url); @@ -124,7 +121,7 @@ public void testServletFilter() throws Exception { LOG.info("RECORDS = " + RECORDS); - //verify records + // verify records for (String url : urls) { assertTrue(RECORDS.remove(url)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java index 0f4c4d5d2a14..85c11e9a648f 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHtmlQuoting.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,13 +31,14 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHtmlQuoting { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestHtmlQuoting.class); - @Test public void testNeedsQuoting() throws Exception { + @Test + public void testNeedsQuoting() throws Exception { assertTrue(HtmlQuoting.needsQuoting("abcde>")); assertTrue(HtmlQuoting.needsQuoting("")); assertEquals("&&&", HtmlQuoting.quoteHtmlChars("&&&")); @@ -58,18 +60,18 @@ public class TestHtmlQuoting { } private void runRoundTrip(String str) throws Exception { - assertEquals(str, - HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); + assertEquals(str, HtmlQuoting.unquoteHtmlChars(HtmlQuoting.quoteHtmlChars(str))); } - @Test public void testRoundtrip() throws Exception { + @Test + public void testRoundtrip() throws Exception { runRoundTrip(""); runRoundTrip("<>&'\""); runRoundTrip("ab>cd params = request.getParameterMap(); SortedSet keys = new TreeSet<>(params.keySet()); - for(String key: keys) { + for (String key : keys) { out.print(key); out.print(':'); String[] values = params.get(key); if (values.length > 0) { out.print(values[0]); - for(int i=1; i < values.length; ++i) { + for (int i = 1; i < values.length; ++i) { out.print(','); out.print(values[i]); } @@ -120,15 +122,14 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro @SuppressWarnings("serial") public static class EchoServlet extends HttpServlet { @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws IOException { + public void doGet(HttpServletRequest request, HttpServletResponse response) throws IOException { PrintWriter out = response.getWriter(); SortedSet sortedKeys = new TreeSet<>(); Enumeration keys = request.getParameterNames(); - while(keys.hasMoreElements()) { + while (keys.hasMoreElements()) { sortedKeys.add(keys.nextElement()); } - for(String key: sortedKeys) { + for (String key : sortedKeys) { out.print(key); out.print(':'); out.print(request.getParameter(key)); @@ -158,7 +159,8 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro } } - @BeforeClass public static void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { Configuration conf = new Configuration(); conf.setInt(HttpServer.HTTP_MAX_THREADS, MAX_THREADS); server = createTestServer(conf); @@ -166,14 +168,14 @@ public void doGet(HttpServletRequest request, HttpServletResponse response) thro server.addUnprivilegedServlet("echomap", "/echomap", EchoMapServlet.class); server.addUnprivilegedServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class); server.addUnprivilegedServlet("longheader", "/longheader", LongHeaderServlet.class); - server.addJerseyResourcePackage( - JerseyResource.class.getPackage().getName(), "/jersey/*"); + server.addJerseyResourcePackage(JerseyResource.class.getPackage().getName(), "/jersey/*"); server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } - @AfterClass public static void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); } @@ -192,13 +194,13 @@ public void testMaxThreads() throws Exception { ready.countDown(); try { start.await(); - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); int serverThreads = server.webServer.getThreadPool().getThreads(); - assertTrue("More threads are started than expected, Server Threads count: " - + serverThreads, serverThreads <= MAX_THREADS); - LOG.info("Number of threads = " + serverThreads + - " which is less or equal than the max = " + MAX_THREADS); + assertTrue( + "More threads are started than expected, Server Threads count: " + serverThreads, + serverThreads <= MAX_THREADS); + LOG.info("Number of threads = " + serverThreads + + " which is less or equal than the max = " + MAX_THREADS); } catch (Exception e) { // do nothing } @@ -209,31 +211,30 @@ public void testMaxThreads() throws Exception { start.countDown(); } - @Test public void testEcho() throws Exception { - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", - readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); + @Test + public void testEcho() throws Exception { + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echo?a=b&c=d"))); + assertEquals("a:b\nc<:d\ne:>\n", readOutput(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); } /** Test the echo map servlet that uses getParameterMap. */ - @Test public void testEchoMap() throws Exception { - assertEquals("a:b\nc:d\n", - readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); - assertEquals("a:b,>\nc<:d\n", - readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); + @Test + public void testEchoMap() throws Exception { + assertEquals("a:b\nc:d\n", readOutput(new URL(baseUrl, "/echomap?a=b&c=d"))); + assertEquals("a:b,>\nc<:d\n", readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); } /** - * Test that verifies headers can be up to 64K long. - * The test adds a 63K header leaving 1K for other headers. - * This is because the header buffer setting is for ALL headers, - * names and values included. */ - @Test public void testLongHeader() throws Exception { + * Test that verifies headers can be up to 64K long. The test adds a 63K header leaving 1K for + * other headers. This is because the header buffer setting is for ALL headers, names and values + * included. + */ + @Test + public void testLongHeader() throws Exception { URL url = new URL(baseUrl, "/longheader"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); StringBuilder sb = new StringBuilder(); - for (int i = 0 ; i < 63 * 1024; i++) { + for (int i = 0; i < 63 * 1024; i++) { sb.append("a"); } conn.setRequestProperty("longheader", sb.toString()); @@ -244,14 +245,14 @@ public void testMaxThreads() throws Exception { public void testContentTypes() throws Exception { // Static CSS files should have text/css URL cssUrl = new URL(baseUrl, "/static/test.css"); - HttpURLConnection conn = (HttpURLConnection)cssUrl.openConnection(); + HttpURLConnection conn = (HttpURLConnection) cssUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/css", conn.getContentType()); // Servlets should have text/plain with proper encoding by default URL servletUrl = new URL(baseUrl, "/echo?a=b"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/plain;charset=utf-8", conn.getContentType()); @@ -259,14 +260,14 @@ public void testContentTypes() throws Exception { // We should ignore parameters for mime types - ie a parameter // ending in .css should not change mime type servletUrl = new URL(baseUrl, "/echo?a=b.css"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/plain;charset=utf-8", conn.getContentType()); // Servlets that specify text/html should get that content type servletUrl = new URL(baseUrl, "/htmlcontent"); - conn = (HttpURLConnection)servletUrl.openConnection(); + conn = (HttpURLConnection) servletUrl.openConnection(); conn.connect(); assertEquals(200, conn.getResponseCode()); assertEquals("text/html;charset=utf-8", conn.getContentType()); @@ -335,21 +336,20 @@ private static String readFully(final InputStream input) throws IOException { } /** - * Dummy filter that mimics as an authentication filter. Obtains user identity - * from the request parameter user.name. Wraps around the request so that - * request.getRemoteUser() returns the user identity. - * + * Dummy filter that mimics as an authentication filter. Obtains user identity from the request + * parameter user.name. Wraps around the request so that request.getRemoteUser() returns the user + * identity. */ public static class DummyServletFilter implements Filter { @Override - public void destroy() { } + public void destroy() { + } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain filterChain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain) + throws IOException, ServletException { final String userName = request.getParameter("user.name"); - ServletRequest requestModified = - new HttpServletRequestWrapper((HttpServletRequest) request) { + ServletRequest requestModified = new HttpServletRequestWrapper((HttpServletRequest) request) { @Override public String getRemoteUser() { return userName; @@ -359,12 +359,12 @@ public String getRemoteUser() { } @Override - public void init(FilterConfig arg0) { } + public void init(FilterConfig arg0) { + } } /** * FilterInitializer that initialized the DummyFilter. - * */ public static class DummyFilterInitializer extends FilterInitializer { public DummyFilterInitializer() { @@ -377,10 +377,8 @@ public void initFilter(FilterContainer container, Configuration conf) { } /** - * Access a URL and get the corresponding return Http status code. The URL - * will be accessed as the passed user, by sending user.name request - * parameter. - * + * Access a URL and get the corresponding return Http status code. The URL will be accessed as the + * passed user, by sending user.name request parameter. * @param urlstring The url to access * @param userName The user to perform access as * @return The HTTP response code @@ -389,7 +387,7 @@ public void initFilter(FilterContainer container, Configuration conf) { private static int getHttpStatusCode(String urlstring, String userName) throws IOException { URL url = new URL(urlstring + "?user.name=" + userName); System.out.println("Accessing " + url + " as user " + userName); - HttpURLConnection connection = (HttpURLConnection)url.openConnection(); + HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.connect(); return connection.getResponseCode(); } @@ -411,9 +409,8 @@ public List getGroups(String user) { } /** - * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics - * servlets, when authentication filters are set, but authorization is not - * enabled. + * Verify the access for /logs, /stacks, /conf, /logLevel and /metrics servlets, when + * authentication filters are set, but authorization is not enabled. */ @Test @Ignore @@ -421,10 +418,9 @@ public void testDisabledAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); // Authorization is disabled by default - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - DummyFilterInitializer.class.getName()); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - MyGroupsProvider.class.getName()); + MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); @@ -434,33 +430,29 @@ public void testDisabledAuthorizationOfDefaultServlets() throws Exception { .addEndpoint(new URI("http://localhost:0")).setFindPort(true).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; + String serverURL = + "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB" }) { - assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL - + servlet, user)); + assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } } myServer.stop(); } /** - * Verify the administrator access for /logs, /stacks, /conf, /logLevel and - * /metrics servlets. + * Verify the administrator access for /logs, /stacks, /conf, /logLevel and /metrics servlets. */ @Test @Ignore public void testAuthorizationOfDefaultServlets() throws Exception { Configuration conf = new Configuration(); - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, - true); - conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, - true); - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - DummyFilterInitializer.class.getName()); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); + conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, DummyFilterInitializer.class.getName()); conf.set(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, - MyGroupsProvider.class.getName()); + MyGroupsProvider.class.getName()); Groups.getUserToGroupsMappingService(conf); MyGroupsProvider.clearMapping(); MyGroupsProvider.mapping.put("userA", Collections.singletonList("groupA")); @@ -475,15 +467,14 @@ public void testAuthorizationOfDefaultServlets() throws Exception { myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); - String serverURL = "http://" - + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; + String serverURL = + "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)) + "/"; for (String servlet : new String[] { "conf", "logs", "stacks", "logLevel", "metrics" }) { for (String user : new String[] { "userA", "userB", "userC", "userD" }) { - assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL - + servlet, user)); + assertEquals(HttpURLConnection.HTTP_OK, getHttpStatusCode(serverURL + servlet, user)); } - assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, getHttpStatusCode( - serverURL + servlet, "userE")); + assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, + getHttpStatusCode(serverURL + servlet, "userE")); } myServer.stop(); } @@ -494,8 +485,8 @@ public void testRequestQuoterWithNull() { Mockito.doReturn(null).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter = new RequestQuoter(request); String[] parameterValues = requestQuoter.getParameterValues("dummy"); - Assert.assertNull("It should return null " - + "when there are no values for the parameter", parameterValues); + Assert.assertNull("It should return null " + "when there are no values for the parameter", + parameterValues); } @Test @@ -505,16 +496,16 @@ public void testRequestQuoterWithNotNull() { Mockito.doReturn(values).when(request).getParameterValues("dummy"); RequestQuoter requestQuoter = new RequestQuoter(request); String[] parameterValues = requestQuoter.getParameterValues("dummy"); - Assert.assertTrue("It should return Parameter Values", Arrays.equals( - values, parameterValues)); + Assert.assertTrue("It should return Parameter Values", Arrays.equals(values, parameterValues)); } @SuppressWarnings("unchecked") private static Map parse(String jsonString) { - return (Map)JSON.parse(jsonString); + return (Map) JSON.parse(jsonString); } - @Test public void testJersey() throws Exception { + @Test + public void testJersey() throws Exception { LOG.info("BEGIN testJersey()"); final String js = readOutput(new URL(baseUrl, "/jersey/foo?op=bar")); final Map m = parse(js); @@ -535,33 +526,33 @@ public void testHasAdministratorAccess() throws Exception { Mockito.when(request.getRemoteUser()).thenReturn(null); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - //authorization OFF + // authorization OFF Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); - //authorization ON & user NULL + // authorization ON & user NULL response = Mockito.mock(HttpServletResponse.class); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), - Mockito.anyString()); + Mockito.anyString()); - //authorization ON & user NOT NULL & ACLs NULL + // authorization ON & user NOT NULL & ACLs NULL response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getRemoteUser()).thenReturn("foo"); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); - //authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs + // authorization ON & user NOT NULL & ACLs NOT NULL & user not in ACLs response = Mockito.mock(HttpServletResponse.class); AccessControlList acls = Mockito.mock(AccessControlList.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_FORBIDDEN), - Mockito.anyString()); + Mockito.anyString()); - //authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs + // authorization ON & user NOT NULL & ACLs NOT NULL & user in in ACLs response = Mockito.mock(HttpServletResponse.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(true); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(true); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); @@ -575,14 +566,14 @@ public void testRequiresAuthorizationAccess() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - //requires admin access to instrumentation, FALSE by default + // requires admin access to instrumentation, FALSE by default Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response)); - //requires admin access to instrumentation, TRUE + // requires admin access to instrumentation, TRUE conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); AccessControlList acls = Mockito.mock(AccessControlList.class); - Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false); + Mockito.when(acls.isUserAllowed(Mockito. any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response)); } @@ -611,8 +602,7 @@ public void testBindAddress() throws Exception { } } - private HttpServer checkBindAddress(String host, int port, boolean findPort) - throws Exception { + private HttpServer checkBindAddress(String host, int port, boolean findPort) throws Exception { HttpServer server = createServer(host, port); try { // not bound, ephemeral should return requested port (0 for ephemeral) @@ -645,14 +635,12 @@ public void testXFrameHeaderSameOrigin() throws Exception { conf.set("hbase.http.filter.xframeoptions.mode", "SAMEORIGIN"); HttpServer myServer = new HttpServer.Builder().setName("test") - .addEndpoint(new URI("http://localhost:0")) - .setFindPort(true).setConf(conf).build(); + .addEndpoint(new URI("http://localhost:0")).setFindPort(true).setConf(conf).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.addUnprivilegedServlet("echo", "/echo", EchoServlet.class); myServer.start(); - String serverURL = "http://" - + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); + String serverURL = "http://" + NetUtils.getHostPortString(myServer.getConnectorAddress(0)); URL url = new URL(new URL(serverURL), "/echo?a=b&c=d"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java index ce0d6d6bc327..e2513bcfe76c 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerLifecycle.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHttpServerLifecycle extends HttpServerFunctionalTest { @ClassRule @@ -33,8 +33,8 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest { HBaseClassTestRule.forClass(TestHttpServerLifecycle.class); /** - * Check that a server is alive by probing the {@link HttpServer#isAlive()} method - * and the text of its toString() description + * Check that a server is alive by probing the {@link HttpServer#isAlive()} method and the text of + * its toString() description * @param server server */ private void assertAlive(HttpServer server) { @@ -49,16 +49,17 @@ private void assertNotLive(HttpServer server) { /** * Test that the server is alive once started - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testCreatedServerIsNotAlive() throws Throwable { HttpServer server = createTestServer(); assertNotLive(server); } - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStopUnstartedServer() throws Throwable { HttpServer server = createTestServer(); stop(server); @@ -66,10 +67,10 @@ public void testStopUnstartedServer() throws Throwable { /** * Test that the server is alive once started - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStartedServerIsAlive() throws Throwable { HttpServer server = null; server = createTestServer(); @@ -87,15 +88,15 @@ public void testStartedServerIsAlive() throws Throwable { private void assertToStringContains(HttpServer server, String text) { String description = server.toString(); assertTrue("Did not find \"" + text + "\" in \"" + description + "\"", - description.contains(text)); + description.contains(text)); } /** * Test that the server is not alive once stopped - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStoppedServerIsNotAlive() throws Throwable { HttpServer server = createAndStartTestServer(); assertAlive(server); @@ -105,10 +106,10 @@ public void testStoppedServerIsNotAlive() throws Throwable { /** * Test that the server is not alive once stopped - * * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testStoppingTwiceServerIsAllowed() throws Throwable { HttpServer server = createAndStartTestServer(); assertAlive(server); @@ -120,11 +121,10 @@ public void testStoppingTwiceServerIsAllowed() throws Throwable { /** * Test that the server is alive once started - * - * @throws Throwable - * on failure + * @throws Throwable on failure */ - @Ignore ("Hangs on occasion; see HBASE-14430") @Test + @Ignore("Hangs on occasion; see HBASE-14430") + @Test public void testWepAppContextAfterServerStop() throws Throwable { HttpServer server = null; String key = "test.attribute.key"; diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java index 11a7db2fbf05..8a86c7f3833e 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestHttpServerWebapps.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ /** * Test webapp loading */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestHttpServerWebapps extends HttpServerFunctionalTest { @ClassRule @@ -61,8 +61,8 @@ public void testValidServerResource() throws Throwable { public void testMissingServerResource() throws Throwable { try { HttpServer server = createServer("NoSuchWebapp"); - //should not have got here. - //close the server + // should not have got here. + // close the server String serverDescription = server.toString(); stop(server); fail("Expected an exception, got " + serverDescription); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java index 7737b298b6a6..36579a651c1d 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestPathFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestPathFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -62,13 +62,13 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { if (filterConfig == null) { return; } - String uri = ((HttpServletRequest)request).getRequestURI(); + String uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); RECORDS.add(uri); chain.doFilter(request, response); @@ -76,7 +76,8 @@ public void doFilter(ServletRequest request, ServletResponse response, /** Configuration for RecordingFilter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -89,9 +90,8 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testPathSpecFilters() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - RecordingFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, RecordingFilter.Initializer.class.getName()); String[] pathSpecs = { "/path", "/path/*" }; HttpServer http = createTestServer(conf, pathSpecs); http.start(); @@ -105,12 +105,11 @@ public void testPathSpecFilters() throws Exception { final String allURL = "/*"; final String[] filteredUrls = { baseURL, baseSlashURL, addedURL, addedSlashURL, longURL }; - final String[] notFilteredUrls = {rootURL, allURL}; + final String[] notFilteredUrls = { rootURL, allURL }; // access the urls and verify our paths specs got added to the // filters - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (String filteredUrl : filteredUrls) { access(prefix + filteredUrl); @@ -124,7 +123,7 @@ public void testPathSpecFilters() throws Exception { LOG.info("RECORDS = " + RECORDS); - //verify records + // verify records for (String filteredUrl : filteredUrls) { assertTrue(RECORDS.remove(filteredUrl)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java index 7723e6e78871..498bf7c6d3be 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProfileOutputServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,14 +22,13 @@ import java.util.Arrays; import java.util.List; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestProfileOutputServlet { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -37,8 +36,8 @@ public class TestProfileOutputServlet { @Test public void testSanitization() { - List good = Arrays.asList("abcd", "key=value", "key1=value&key2=value2", "", - "host=host-1.example.com"); + List good = + Arrays.asList("abcd", "key=value", "key1=value&key2=value2", "", "host=host-1.example.com"); for (String input : good) { assertEquals(input, ProfileOutputServlet.sanitize(input)); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java index ed98a2f5a7f4..1da537d8d0bd 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestProxyUserSpnegoHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,10 +23,8 @@ import java.security.Principal; import java.security.PrivilegedExceptionAction; import java.util.Set; - import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosTicket; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; @@ -72,7 +70,7 @@ * HttpComponents to verify that the doas= mechanicsm works, and that the proxyuser settings are * observed. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -94,7 +92,6 @@ public class TestProxyUserSpnegoHttpServer extends HttpServerFunctionalTest { private static File privilegedKeytab; private static File privileged2Keytab; - @BeforeClass public static void setupServer() throws Exception { Configuration conf = new Configuration(); @@ -132,7 +129,7 @@ public static void setupServer() throws Exception { server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } @AfterClass @@ -159,7 +156,6 @@ private static void setupUser(SimpleKdcServer kdc, File keytab, String principal kdc.exportPrincipal(principal, keytab); } - protected static Configuration buildSpnegoConfiguration(Configuration conf, String serverPrincipal, File serverKeytab) { KerberosName.setRules("DEFAULT"); @@ -182,13 +178,13 @@ protected static Configuration buildSpnegoConfiguration(Configuration conf, } /** - * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI - * which are meant only for administrators. + * Builds an ACL that will restrict the users who can issue commands to endpoints on the UI which + * are meant only for administrators. */ public static AccessControlList buildAdminAcl(Configuration conf) { final String userGroups = conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_USERS_KEY, null); - final String adminGroups = conf.get( - HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); + final String adminGroups = + conf.get(HttpServer.HTTP_SPNEGO_AUTHENTICATION_ADMIN_GROUPS_KEY, null); if (userGroups == null && adminGroups == null) { // Backwards compatibility - if the user doesn't have anything set, allow all users in. return new AccessControlList("*", null); @@ -198,20 +194,23 @@ public static AccessControlList buildAdminAcl(Configuration conf) { @Test public void testProxyAllowed() throws Exception { - testProxy(WHEEL_PRINCIPAL, PRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_OK, null); + testProxy(WHEEL_PRINCIPAL, PRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_OK, null); } @Test public void testProxyDisallowedForUnprivileged() throws Exception { - testProxy(WHEEL_PRINCIPAL, UNPRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, "403 User unprivileged is unauthorized to access this page."); + testProxy(WHEEL_PRINCIPAL, UNPRIVILEGED_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, + "403 User unprivileged is unauthorized to access this page."); } @Test public void testProxyDisallowedForNotSudoAble() throws Exception { - testProxy(WHEEL_PRINCIPAL, PRIVILEGED2_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, "403 Forbidden"); + testProxy(WHEEL_PRINCIPAL, PRIVILEGED2_PRINCIPAL, HttpURLConnection.HTTP_FORBIDDEN, + "403 Forbidden"); } - public void testProxy(String clientPrincipal, String doAs, int responseCode, String statusLine) throws Exception { + public void testProxy(String clientPrincipal, String doAs, int responseCode, String statusLine) + throws Exception { // Create the subject for the client final Subject clientSubject = JaasKrbUtil.loginUsingKeytab(WHEEL_PRINCIPAL, wheelKeytab); final Set clientPrincipals = clientSubject.getPrincipals(); @@ -221,7 +220,7 @@ public void testProxy(String clientPrincipal, String doAs, int responseCode, Str // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set privateCredentials = - clientSubject.getPrivateCredentials(KerberosTicket.class); + clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt); @@ -231,34 +230,32 @@ public void testProxy(String clientPrincipal, String doAs, int responseCode, Str // Run this code, logged in as the subject (the client) HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public HttpResponse run() throws Exception { - // Logs in with Kerberos via GSS - GSSManager gssManager = GSSManager.getInstance(); - // jGSS Kerberos login constant - Oid oid = new Oid("1.2.840.113554.1.2.2"); - GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential(gssClient, - GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - - HttpClientContext context = HttpClientContext.create(); - Lookup authRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)) - .build(); - - HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) - .build(); - BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); - - URL url = new URL(getServerURL(server), "/echo?doAs=" + doAs + "&a=b"); - context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); - context.setCredentialsProvider(credentialsProvider); - context.setAuthSchemeRegistry(authRegistry); - - HttpGet get = new HttpGet(url.toURI()); - return client.execute(get, context); - } + @Override + public HttpResponse run() throws Exception { + // Logs in with Kerberos via GSS + GSSManager gssManager = GSSManager.getInstance(); + // jGSS Kerberos login constant + Oid oid = new Oid("1.2.840.113554.1.2.2"); + GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + + HttpClientContext context = HttpClientContext.create(); + Lookup authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); + + HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build(); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); + + URL url = new URL(getServerURL(server), "/echo?doAs=" + doAs + "&a=b"); + context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); + + HttpGet get = new HttpGet(url.toURI()); + return client.execute(get, context); + } }); assertNotNull(resp); @@ -266,8 +263,8 @@ public HttpResponse run() throws Exception { if (responseCode == HttpURLConnection.HTTP_OK) { assertTrue(EntityUtils.toString(resp.getEntity()).trim().contains("a:b")); } else { - assertTrue(resp.getStatusLine().toString().contains(statusLine) || - EntityUtils.toString(resp.getEntity()).contains(statusLine)); + assertTrue(resp.getStatusLine().toString().contains(statusLine) + || EntityUtils.toString(resp.getEntity()).contains(statusLine)); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java index 2b72793a690f..14f6cb86fd37 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSSLHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,19 +45,18 @@ import org.slf4j.LoggerFactory; /** - * This testcase issues SSL certificates configures the HttpServer to serve - * HTTPS using the created certficates and calls an echo servlet using the - * corresponding HTTPS URL. + * This testcase issues SSL certificates configures the HttpServer to serve HTTPS using the created + * certficates and calls an echo servlet using the corresponding HTTPS URL. */ -@Category({MiscTests.class, MediumTests.class}) +@Category({ MiscTests.class, MediumTests.class }) public class TestSSLHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestSSLHttpServer.class); - private static final String BASEDIR = System.getProperty("test.build.dir", - "target/test-dir") + "/" + TestSSLHttpServer.class.getSimpleName(); + private static final String BASEDIR = System.getProperty("test.build.dir", "target/test-dir") + + "/" + TestSSLHttpServer.class.getSimpleName(); private static final Logger LOG = LoggerFactory.getLogger(TestSSLHttpServer.class); private static Configuration serverConf; @@ -91,22 +90,20 @@ public static void setup() throws Exception { clientSslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, clientConf); clientSslFactory.init(); - server = new HttpServer.Builder() - .setName("test") - .addEndpoint(new URI("https://localhost")) - .setConf(serverConf) - .keyPassword(HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.keypassword", - null)) - .keyStore(serverConf.get("ssl.server.keystore.location"), - HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.password", null), - clientConf.get("ssl.server.keystore.type", "jks")) - .trustStore(serverConf.get("ssl.server.truststore.location"), - HBaseConfiguration.getPassword(serverConf, "ssl.server.truststore.password", null), - serverConf.get("ssl.server.truststore.type", "jks")).build(); + server = new HttpServer.Builder().setName("test").addEndpoint(new URI("https://localhost")) + .setConf(serverConf) + .keyPassword( + HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.keypassword", null)) + .keyStore(serverConf.get("ssl.server.keystore.location"), + HBaseConfiguration.getPassword(serverConf, "ssl.server.keystore.password", null), + clientConf.get("ssl.server.keystore.type", "jks")) + .trustStore(serverConf.get("ssl.server.truststore.location"), + HBaseConfiguration.getPassword(serverConf, "ssl.server.truststore.password", null), + serverConf.get("ssl.server.truststore.type", "jks")) + .build(); server.addUnprivilegedServlet("echo", "/echo", TestHttpServer.EchoServlet.class); server.start(); - baseUrl = new URL("https://" - + NetUtils.getHostPortString(server.getConnectorAddress(0))); + baseUrl = new URL("https://" + NetUtils.getHostPortString(server.getConnectorAddress(0))); LOG.info("HTTP server started: " + baseUrl); } @@ -121,8 +118,7 @@ public static void cleanup() throws Exception { @Test public void testEcho() throws Exception { assertEquals("a:b\nc:d\n", readOut(new URL(baseUrl, "/echo?a=b&c=d"))); - assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, - "/echo?a=b&c<=d&e=>"))); + assertEquals("a:b\nc<:d\ne:>\n", readOut(new URL(baseUrl, "/echo?a=b&c<=d&e=>"))); } @Test diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java index 6b9d2c341ed7..a9e1028923ab 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSecurityHeadersFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({HttpServerFunctionalTest.class, MediumTests.class}) +@Category({ HttpServerFunctionalTest.class, MediumTests.class }) public class TestSecurityHeadersFilter { private static URL baseUrl; private HttpServer http; @@ -62,28 +62,27 @@ public void testDefaultValues() throws Exception { assertThat(conn.getResponseCode(), equalTo(HttpURLConnection.HTTP_OK)); assertThat("Header 'X-Content-Type-Options' is missing", - conn.getHeaderField("X-Content-Type-Options"), is(not((String)null))); + conn.getHeaderField("X-Content-Type-Options"), is(not((String) null))); assertThat(conn.getHeaderField("X-Content-Type-Options"), equalTo("nosniff")); - assertThat("Header 'X-XSS-Protection' is missing", - conn.getHeaderField("X-XSS-Protection"), is(not((String)null))); + assertThat("Header 'X-XSS-Protection' is missing", conn.getHeaderField("X-XSS-Protection"), + is(not((String) null))); assertThat("Header 'X-XSS-Protection' has invalid value", - conn.getHeaderField("X-XSS-Protection"), equalTo("1; mode=block")); + conn.getHeaderField("X-XSS-Protection"), equalTo("1; mode=block")); - assertThat("Header 'Strict-Transport-Security' should be missing from response," + - "but it's present", - conn.getHeaderField("Strict-Transport-Security"), is((String)null)); - assertThat("Header 'Content-Security-Policy' should be missing from response," + - "but it's present", - conn.getHeaderField("Content-Security-Policy"), is((String)null)); + assertThat( + "Header 'Strict-Transport-Security' should be missing from response," + "but it's present", + conn.getHeaderField("Strict-Transport-Security"), is((String) null)); + assertThat( + "Header 'Content-Security-Policy' should be missing from response," + "but it's present", + conn.getHeaderField("Content-Security-Policy"), is((String) null)); } @Test public void testHstsAndCspSettings() throws IOException { Configuration conf = new Configuration(); - conf.set("hbase.http.filter.hsts.value", - "max-age=63072000;includeSubDomains;preload"); + conf.set("hbase.http.filter.hsts.value", "max-age=63072000;includeSubDomains;preload"); conf.set("hbase.http.filter.csp.value", - "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); + "default-src https: data: 'unsafe-inline' 'unsafe-eval'"); http = createTestServer(conf); http.start(); baseUrl = getServerURL(http); @@ -93,15 +92,15 @@ public void testHstsAndCspSettings() throws IOException { assertThat(conn.getResponseCode(), equalTo(HttpURLConnection.HTTP_OK)); assertThat("Header 'Strict-Transport-Security' is missing from Rest response", - conn.getHeaderField("Strict-Transport-Security"), Is.is(not((String)null))); + conn.getHeaderField("Strict-Transport-Security"), Is.is(not((String) null))); assertThat("Header 'Strict-Transport-Security' has invalid value", - conn.getHeaderField("Strict-Transport-Security"), - IsEqual.equalTo("max-age=63072000;includeSubDomains;preload")); + conn.getHeaderField("Strict-Transport-Security"), + IsEqual.equalTo("max-age=63072000;includeSubDomains;preload")); assertThat("Header 'Content-Security-Policy' is missing from Rest response", - conn.getHeaderField("Content-Security-Policy"), Is.is(not((String)null))); + conn.getHeaderField("Content-Security-Policy"), Is.is(not((String) null))); assertThat("Header 'Content-Security-Policy' has invalid value", - conn.getHeaderField("Content-Security-Policy"), - IsEqual.equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); + conn.getHeaderField("Content-Security-Policy"), + IsEqual.equalTo("default-src https: data: 'unsafe-inline' 'unsafe-eval'")); } } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java index 1e9a2861c9ef..699ccbc2939a 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestServletFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.io.IOException; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -42,7 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestServletFilter extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -66,20 +65,21 @@ public void destroy() { } @Override - public void doFilter(ServletRequest request, ServletResponse response, - FilterChain chain) throws IOException, ServletException { + public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) + throws IOException, ServletException { if (filterConfig == null) { return; } - uri = ((HttpServletRequest)request).getRequestURI(); + uri = ((HttpServletRequest) request).getRequestURI(); LOG.info("filtering " + uri); chain.doFilter(request, response); } /** Configuration for the filter */ static public class Initializer extends FilterInitializer { - public Initializer() {} + public Initializer() { + } @Override public void initFilter(FilterContainer container, Configuration conf) { @@ -90,22 +90,21 @@ public void initFilter(FilterContainer container, Configuration conf) { private static void assertExceptionContains(String string, Throwable t) { String msg = t.getMessage(); - Assert.assertTrue( - "Expected to find '" + string + "' but got unexpected exception:" - + StringUtils.stringifyException(t), msg.contains(string)); + Assert.assertTrue("Expected to find '" + string + "' but got unexpected exception:" + + StringUtils.stringifyException(t), + msg.contains(string)); } @Test @Ignore - //From stack + // From stack // Its a 'foreign' test, one that came in from hadoop when we copy/pasted http // It's second class. Could comment it out if only failing test (as per @nkeywal – sort of) public void testServletFilter() throws Exception { Configuration conf = new Configuration(); - //start an http server with CountingFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - SimpleFilter.Initializer.class.getName()); + // start an http server with CountingFilter + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, SimpleFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); http.start(); @@ -115,23 +114,22 @@ public void testServletFilter() throws Exception { final String logURL = "/logs/a.log"; final String hadooplogoURL = "/static/hadoop-logo.jpg"; - final String[] urls = {fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL}; + final String[] urls = { fsckURL, stacksURL, ajspURL, logURL, hadooplogoURL }; final Random rand = ThreadLocalRandom.current(); final int[] sequence = new int[50]; - //generate a random sequence and update counts - for(int i = 0; i < sequence.length; i++) { + // generate a random sequence and update counts + for (int i = 0; i < sequence.length; i++) { sequence[i] = rand.nextInt(urls.length); } - //access the urls as the sequence - final String prefix = "http://" - + NetUtils.getHostPortString(http.getConnectorAddress(0)); + // access the urls as the sequence + final String prefix = "http://" + NetUtils.getHostPortString(http.getConnectorAddress(0)); try { for (int aSequence : sequence) { access(prefix + urls[aSequence]); - //make sure everything except fsck get filtered + // make sure everything except fsck get filtered if (aSequence == 0) { assertNull(uri); } else { @@ -166,8 +164,7 @@ public void initFilter(FilterContainer container, Configuration conf) { public void testServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); // start an http server with ErrorFilter - conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, - ErrorFilter.Initializer.class.getName()); + conf.set(HttpServer.FILTER_INITIALIZERS_PROPERTY, ErrorFilter.Initializer.class.getName()); HttpServer http = createTestServer(conf); try { http.start(); @@ -178,17 +175,15 @@ public void testServletFilterWhenInitThrowsException() throws Exception { } /** - * Similar to the above test case, except that it uses a different API to add the - * filter. Regression test for HADOOP-8786. + * Similar to the above test case, except that it uses a different API to add the filter. + * Regression test for HADOOP-8786. */ @Test - public void testContextSpecificServletFilterWhenInitThrowsException() - throws Exception { + public void testContextSpecificServletFilterWhenInitThrowsException() throws Exception { Configuration conf = new Configuration(); HttpServer http = createTestServer(conf); - HttpServer.defineFilter(http.webAppContext, - "ErrorFilter", ErrorFilter.class.getName(), - null, null); + HttpServer.defineFilter(http.webAppContext, "ErrorFilter", ErrorFilter.class.getName(), null, + null); try { http.start(); fail("expecting exception"); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java index 28e4fcf093b1..01c9a853cfb6 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/TestSpnegoHttpServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,7 +69,7 @@ * Test class for SPNEGO authentication on the HttpServer. Uses Kerby's MiniKDC and Apache * HttpComponents to verify that a simple Servlet is reachable via SPNEGO and unreachable w/o. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestSpnegoHttpServer extends HttpServerFunctionalTest { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -114,7 +114,7 @@ public static void setupServer() throws Exception { server.start(); baseUrl = getServerURL(server); - LOG.info("HTTP server started: "+ baseUrl); + LOG.info("HTTP server started: " + baseUrl); } @AfterClass @@ -174,7 +174,7 @@ public void testAllowedClient() throws Exception { // Get a TGT for the subject (might have many, different encryption types). The first should // be the default encryption type. Set privateCredentials = - clientSubject.getPrivateCredentials(KerberosTicket.class); + clientSubject.getPrivateCredentials(KerberosTicket.class); assertFalse(privateCredentials.isEmpty()); KerberosTicket tgt = privateCredentials.iterator().next(); assertNotNull(tgt); @@ -184,34 +184,32 @@ public void testAllowedClient() throws Exception { // Run this code, logged in as the subject (the client) HttpResponse resp = Subject.doAs(clientSubject, new PrivilegedExceptionAction() { - @Override - public HttpResponse run() throws Exception { - // Logs in with Kerberos via GSS - GSSManager gssManager = GSSManager.getInstance(); - // jGSS Kerberos login constant - Oid oid = new Oid("1.2.840.113554.1.2.2"); - GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); - GSSCredential credential = gssManager.createCredential(gssClient, - GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); - - HttpClientContext context = HttpClientContext.create(); - Lookup authRegistry = RegistryBuilder.create() - .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)) - .build(); - - HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry) - .build(); - BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); - - URL url = new URL(getServerURL(server), "/echo?a=b"); - context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); - context.setCredentialsProvider(credentialsProvider); - context.setAuthSchemeRegistry(authRegistry); - - HttpGet get = new HttpGet(url.toURI()); - return client.execute(get, context); - } + @Override + public HttpResponse run() throws Exception { + // Logs in with Kerberos via GSS + GSSManager gssManager = GSSManager.getInstance(); + // jGSS Kerberos login constant + Oid oid = new Oid("1.2.840.113554.1.2.2"); + GSSName gssClient = gssManager.createName(principalName, GSSName.NT_USER_NAME); + GSSCredential credential = gssManager.createCredential(gssClient, + GSSCredential.DEFAULT_LIFETIME, oid, GSSCredential.INITIATE_ONLY); + + HttpClientContext context = HttpClientContext.create(); + Lookup authRegistry = RegistryBuilder. create() + .register(AuthSchemes.SPNEGO, new SPNegoSchemeFactory(true, true)).build(); + + HttpClient client = HttpClients.custom().setDefaultAuthSchemeRegistry(authRegistry).build(); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(AuthScope.ANY, new KerberosCredentials(credential)); + + URL url = new URL(getServerURL(server), "/echo?a=b"); + context.setTargetHost(new HttpHost(url.getHost(), url.getPort())); + context.setCredentialsProvider(credentialsProvider); + context.setAuthSchemeRegistry(authRegistry); + + HttpGet get = new HttpGet(url.toURI()); + return client.execute(get, context); + } }); assertNotNull(resp); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java index ac2ef8f66497..8ba4f72ce71d 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/conf/TestConfServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,8 +44,8 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON; /** - * Basic test case that the ConfServlet can write configuration - * to its output in XML and JSON format. + * Basic test case that the ConfServlet can write configuration to its output in XML and JSON + * format. */ @Category({ MiscTests.class, SmallTests.class }) public class TestConfServlet { @@ -74,15 +74,14 @@ public void testWriteJson() throws Exception { programSet.add("programatically"); programSet.add("programmatically"); Object parsed = JSON.parse(json); - Object[] properties = ((Map)parsed).get("properties"); + Object[] properties = ((Map) parsed).get("properties"); for (Object o : properties) { - Map propertyInfo = (Map)o; - String key = (String)propertyInfo.get("key"); - String val = (String)propertyInfo.get("value"); - String resource = (String)propertyInfo.get("resource"); + Map propertyInfo = (Map) o; + String key = (String) propertyInfo.get("key"); + String val = (String) propertyInfo.get("value"); + String resource = (String) propertyInfo.get("resource"); System.err.println("k: " + key + " v: " + val + " r: " + resource); - if (TEST_KEY.equals(key) && TEST_VAL.equals(val) - && programSet.contains(resource)) { + if (TEST_KEY.equals(key) && TEST_VAL.equals(val) && programSet.contains(resource)) { foundSetting = true; } } @@ -95,8 +94,7 @@ public void testWriteXml() throws Exception { ConfServlet.writeResponse(getTestConf(), sw, "xml"); String xml = sw.toString(); - DocumentBuilderFactory docBuilderFactory - = DocumentBuilderFactory.newInstance(); + DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory.newInstance(); DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = builder.parse(new InputSource(new StringReader(xml))); NodeList nameNodes = doc.getElementsByTagName("name"); @@ -107,7 +105,7 @@ public void testWriteXml() throws Exception { System.err.println("xml key: " + key); if (TEST_KEY.equals(key)) { foundSetting = true; - Element propertyElem = (Element)nameNode.getParentNode(); + Element propertyElem = (Element) nameNode.getParentNode(); String val = propertyElem.getElementsByTagName("value").item(0).getTextContent(); assertEquals(TEST_VAL, val); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java index e907a3260b0b..bef51e937b27 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/jmx/TestJMXJsonServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestJMXJsonServlet extends HttpServerFunctionalTest { @ClassRule @@ -47,7 +47,8 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { private static HttpServer server; private static URL baseUrl; - @BeforeClass public static void setup() throws Exception { + @BeforeClass + public static void setup() throws Exception { // Eclipse doesn't pick this up correctly from the plugin // configuration in the pom. System.setProperty(HttpServerFunctionalTest.TEST_BUILD_WEBAPPS, "target/test-classes/webapps"); @@ -56,68 +57,67 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest { baseUrl = getServerURL(server); } - @AfterClass public static void cleanup() throws Exception { + @AfterClass + public static void cleanup() throws Exception { server.stop(); } public static void assertReFind(String re, String value) { Pattern p = Pattern.compile(re); Matcher m = p.matcher(value); - assertTrue("'"+p+"' does not match "+value, m.find()); + assertTrue("'" + p + "' does not match " + value, m.find()); } public static void assertNotFind(String re, String value) { Pattern p = Pattern.compile(re); Matcher m = p.matcher(value); - assertFalse("'"+p+"' should not match "+value, m.find()); + assertFalse("'" + p + "' should not match " + value, m.find()); } - @Test public void testQuery() throws Exception { + @Test + public void testQuery() throws Exception { String result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Runtime")); - LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Runtime RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Runtime\"", result); assertReFind("\"modelerType\"", result); result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory")); - LOG.info("/jmx?qry=java.lang:type=Memory RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Memory RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"modelerType\"", result); result = readOutput(new URL(baseUrl, "/jmx")); - LOG.info("/jmx RESULT: "+result); + LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); // test to get an attribute of a mbean - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::HeapMemoryUsage")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::HeapMemoryUsage")); + LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); // negative test to get an attribute of a mbean - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::")); + LOG.info("/jmx RESULT: " + result); assertReFind("\"ERROR\"", result); // test to get JSONP result result = readOutput(new URL(baseUrl, "/jmx?qry=java.lang:type=Memory&callback=mycallback1")); - LOG.info("/jmx?qry=java.lang:type=Memory&callback=mycallback RESULT: "+result); + LOG.info("/jmx?qry=java.lang:type=Memory&callback=mycallback RESULT: " + result); assertReFind("^mycallback1\\(\\{", result); assertReFind("\\}\\);$", result); // negative test to get an attribute of a mbean as JSONP - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::&callback=mycallback2")); - LOG.info("/jmx RESULT: "+result); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::&callback=mycallback2")); + LOG.info("/jmx RESULT: " + result); assertReFind("^mycallback2\\(\\{", result); assertReFind("\"ERROR\"", result); assertReFind("\\}\\);$", result); // test to get an attribute of a mbean as JSONP - result = readOutput(new URL(baseUrl, - "/jmx?get=java.lang:type=Memory::HeapMemoryUsage&callback=mycallback3")); - LOG.info("/jmx RESULT: "+result); + result = readOutput( + new URL(baseUrl, "/jmx?get=java.lang:type=Memory::HeapMemoryUsage&callback=mycallback3")); + LOG.info("/jmx RESULT: " + result); assertReFind("^mycallback3\\(\\{", result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); @@ -135,8 +135,7 @@ public void testGetPattern() throws Exception { assertReFind("\"NonHeapMemoryUsage\"\\s*:", result); assertNotFind("\"HeapMemoryUsage\"\\s*:", result); - result = - readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[^Non]*HeapMemoryUsage")); + result = readOutput(new URL(baseUrl, "/jmx?get=java.lang:type=Memory::[^Non]*HeapMemoryUsage")); LOG.info("/jmx RESULT: " + result); assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result); assertReFind("\"committed\"\\s*:", result); @@ -160,8 +159,8 @@ public void testPatternMatching() throws Exception { @Test public void testDisallowedJSONPCallback() throws Exception { String callback = "function(){alert('bigproblems!')};foo"; - URL url = new URL( - baseUrl, "/jmx?qry=java.lang:type=Memory&callback="+URLEncoder.encode(callback, "UTF-8")); + URL url = new URL(baseUrl, + "/jmx?qry=java.lang:type=Memory&callback=" + URLEncoder.encode(callback, "UTF-8")); HttpURLConnection cnxn = (HttpURLConnection) url.openConnection(); assertEquals(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, cnxn.getResponseCode()); } @@ -169,8 +168,8 @@ public void testDisallowedJSONPCallback() throws Exception { @Test public void testUnderscoresInJSONPCallback() throws Exception { String callback = "my_function"; - URL url = new URL( - baseUrl, "/jmx?qry=java.lang:type=Memory&callback="+URLEncoder.encode(callback, "UTF-8")); + URL url = new URL(baseUrl, + "/jmx?qry=java.lang:type=Memory&callback=" + URLEncoder.encode(callback, "UTF-8")); HttpURLConnection cnxn = (HttpURLConnection) url.openConnection(); assertEquals(HttpServletResponse.SC_OK, cnxn.getResponseCode()); } diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java index 39855ee86eff..ada47b8d38eb 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/lib/TestStaticUserWebFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestStaticUserWebFilter { @ClassRule @@ -47,8 +47,8 @@ public class TestStaticUserWebFilter { private FilterConfig mockConfig(String username) { FilterConfig mock = Mockito.mock(FilterConfig.class); - Mockito.doReturn(username).when(mock).getInitParameter( - ServerConfigurationKeys.HBASE_HTTP_STATIC_USER); + Mockito.doReturn(username).when(mock) + .getInitParameter(ServerConfigurationKeys.HBASE_HTTP_STATIC_USER); return mock; } @@ -59,14 +59,13 @@ public void testFilter() throws Exception { suf.init(config); ArgumentCaptor wrapperArg = - ArgumentCaptor.forClass(HttpServletRequestWrapper.class); + ArgumentCaptor.forClass(HttpServletRequestWrapper.class); FilterChain chain = mock(FilterChain.class); - suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class), - chain); + suf.doFilter(mock(HttpServletRequest.class), mock(ServletResponse.class), chain); - Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito.anyObject()); + Mockito.verify(chain).doFilter(wrapperArg.capture(), Mockito. anyObject()); HttpServletRequestWrapper wrapper = wrapperArg.getValue(); assertEquals("myuser", wrapper.getUserPrincipal().getName()); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java index d7889ea4a3b6..f2ed48665a67 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/log/TestLogLevel.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -67,7 +67,7 @@ public class TestLogLevel { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLogLevel.class); + HBaseClassTestRule.forClass(TestLogLevel.class); private static String keystoresDir; private static String sslConfDir; @@ -78,7 +78,7 @@ public class TestLogLevel { private static final String protectedPrefix = "protected"; private static final String protectedLogName = protectedPrefix + "." + logName; private static final org.apache.logging.log4j.Logger log = - org.apache.logging.log4j.LogManager.getLogger(logName); + org.apache.logging.log4j.LogManager.getLogger(logName); private final static String PRINCIPAL = "loglevel.principal"; private final static String KEYTAB = "loglevel.keytab"; @@ -205,7 +205,7 @@ public void testCommandOptions() throws Exception { assertFalse( validateCommand(new String[] { "-setlevel", "foo.bar:8080", className, "DEBUG", "blah" })); assertFalse(validateCommand(new String[] { "-getlevel", "foo.bar:8080", className, "-setlevel", - "foo.bar:8080", className })); + "foo.bar:8080", className })); } /** @@ -236,24 +236,24 @@ private boolean validateCommand(String[] args) { */ private HttpServer createServer(String protocol, boolean isSpnego) throws Exception { HttpServer.Builder builder = new HttpServer.Builder().setName("..") - .addEndpoint(new URI(protocol + "://localhost:0")).setFindPort(true).setConf(serverConf); + .addEndpoint(new URI(protocol + "://localhost:0")).setFindPort(true).setConf(serverConf); if (isSpnego) { // Set up server Kerberos credentials. // Since the server may fall back to simple authentication, // use ACL to make sure the connection is Kerberos/SPNEGO authenticated. builder.setSecurityEnabled(true).setUsernameConfKey(PRINCIPAL).setKeytabConfKey(KEYTAB) - .setACL(new AccessControlList("client")); + .setACL(new AccessControlList("client")); } // if using HTTPS, configure keystore/truststore properties. if (protocol.equals(LogLevel.PROTOCOL_HTTPS)) { builder = builder.keyPassword(sslConf.get("ssl.server.keystore.keypassword")) - .keyStore(sslConf.get("ssl.server.keystore.location"), - sslConf.get("ssl.server.keystore.password"), - sslConf.get("ssl.server.keystore.type", "jks")) - .trustStore(sslConf.get("ssl.server.truststore.location"), - sslConf.get("ssl.server.truststore.password"), - sslConf.get("ssl.server.truststore.type", "jks")); + .keyStore(sslConf.get("ssl.server.keystore.location"), + sslConf.get("ssl.server.keystore.password"), + sslConf.get("ssl.server.keystore.type", "jks")) + .trustStore(sslConf.get("ssl.server.truststore.location"), + sslConf.get("ssl.server.truststore.password"), + sslConf.get("ssl.server.truststore.type", "jks")); } HttpServer server = builder.build(); @@ -262,17 +262,14 @@ private HttpServer createServer(String protocol, boolean isSpnego) throws Except } private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, - final boolean isSpnego) throws Exception { - testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - logName, + final boolean isSpnego) throws Exception { + testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, logName, org.apache.logging.log4j.Level.DEBUG.toString()); } private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, - final boolean isSpnego, final String newLevel) throws Exception { - testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, - logName, - newLevel); + final boolean isSpnego, final String newLevel) throws Exception { + testDynamicLogLevel(bindProtocol, connectProtocol, isSpnego, logName, newLevel); } /** @@ -283,7 +280,7 @@ private void testDynamicLogLevel(final String bindProtocol, final String connect * @throws Exception if client can't accesss server. */ private void testDynamicLogLevel(final String bindProtocol, final String connectProtocol, - final boolean isSpnego, final String loggerName, final String newLevel) throws Exception { + final boolean isSpnego, final String loggerName, final String newLevel) throws Exception { if (!LogLevel.isValidProtocol(bindProtocol)) { throw new Exception("Invalid server protocol " + bindProtocol); } @@ -315,7 +312,7 @@ private void testDynamicLogLevel(final String bindProtocol, final String connect String keytabFilePath = keyTabFile.getAbsolutePath(); UserGroupInformation clientUGI = - UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal, keytabFilePath); + UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal, keytabFilePath); try { clientUGI.doAs((PrivilegedExceptionAction) () -> { // client command line @@ -351,7 +348,7 @@ private void getLevel(String protocol, String authority, String logName) throws * @throws Exception if unable to run or log level does not change as expected */ private void setLevel(String protocol, String authority, String logName, String newLevel) - throws Exception { + throws Exception { String[] setLevelArgs = { "-setlevel", authority, logName, newLevel, "-protocol", protocol }; CLI cli = new CLI(protocol.equalsIgnoreCase("https") ? sslConf : clientConf); cli.run(setLevelArgs); @@ -370,7 +367,8 @@ public void testSettingProtectedLogLevel() throws Exception { fail("Expected IO exception due to protected logger"); } catch (IOException e) { assertTrue(e.getMessage().contains("" + HttpServletResponse.SC_PRECONDITION_FAILED)); - assertTrue(e.getMessage().contains("Modification of logger " + protectedLogName + " is disallowed in configuration.")); + assertTrue(e.getMessage().contains( + "Modification of logger " + protectedLogName + " is disallowed in configuration.")); } } @@ -473,7 +471,7 @@ private static void exceptionShouldContains(String substr, Throwable throwable) } t = t.getCause(); } - throw new AssertionError("Expected to find '" + substr + "' but got unexpected exception:" + - StringUtils.stringifyException(throwable), throwable); + throw new AssertionError("Expected to find '" + substr + "' but got unexpected exception:" + + StringUtils.stringifyException(throwable), throwable); } } \ No newline at end of file diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java index ee900db62301..b683539cc7db 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/resource/JerseyResource.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,9 +34,8 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.util.ajax.JSON; /** - * A simple Jersey resource class TestHttpServer. - * The servlet simply puts the path and the op parameter in a map - * and return it in JSON format in the response. + * A simple Jersey resource class TestHttpServer. The servlet simply puts the path and the op + * parameter in a map and return it in JSON format in the response. */ @Path("") public class JerseyResource { @@ -47,11 +46,9 @@ public class JerseyResource { @GET @Path("{" + PATH + ":.*}") - @Produces({MediaType.APPLICATION_JSON}) - public Response get( - @PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, - @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op - ) throws IOException { + @Produces({ MediaType.APPLICATION_JSON }) + public Response get(@PathParam(PATH) @DefaultValue("UNKNOWN_" + PATH) final String path, + @QueryParam(OP) @DefaultValue("UNKNOWN_" + OP) final String op) throws IOException { LOG.info("get: " + PATH + "=" + path + ", " + OP + "=" + op); final Map m = new TreeMap<>(); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java index c201c7a52328..e9dfa2570e4c 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/http/ssl/KeyStoreTestUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.http.ssl; import java.io.File; @@ -63,7 +62,6 @@ public static String getClasspathDir(Class klass) throws Exception { /** * Create a self-signed X.509 Certificate. - * * @param dn the X.509 Distinguished Name, eg "CN=Test, L=London, C=GB" * @param pair the KeyPair * @param days how many days from now the Certificate is valid for @@ -72,14 +70,13 @@ public static String getClasspathDir(Class klass) throws Exception { */ public static X509Certificate generateCertificate(String dn, KeyPair pair, int days, String algorithm) throws CertificateEncodingException, InvalidKeyException, - IllegalStateException, NoSuchProviderException, NoSuchAlgorithmException, - SignatureException { + IllegalStateException, NoSuchProviderException, NoSuchAlgorithmException, SignatureException { Date from = new Date(); Date to = new Date(from.getTime() + days * 86400000L); BigInteger sn = new BigInteger(64, new SecureRandom()); KeyPair keyPair = pair; X509V1CertificateGenerator certGen = new X509V1CertificateGenerator(); - X500Principal dnName = new X500Principal(dn); + X500Principal dnName = new X500Principal(dn); certGen.setSerialNumber(sn); certGen.setIssuerDN(dnName); @@ -92,28 +89,25 @@ public static X509Certificate generateCertificate(String dn, KeyPair pair, int d return cert; } - public static KeyPair generateKeyPair(String algorithm) - throws NoSuchAlgorithmException { + public static KeyPair generateKeyPair(String algorithm) throws NoSuchAlgorithmException { KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm); keyGen.initialize(1024); return keyGen.genKeyPair(); } - private static KeyStore createEmptyKeyStore() - throws GeneralSecurityException, IOException { + private static KeyStore createEmptyKeyStore() throws GeneralSecurityException, IOException { return createEmptyKeyStore("jks"); } private static KeyStore createEmptyKeyStore(String keyStoreType) - throws GeneralSecurityException, IOException { + throws GeneralSecurityException, IOException { KeyStore ks = KeyStore.getInstance(keyStoreType); ks.load(null, null); // initialize return ks; } - private static void saveKeyStore(KeyStore ks, String filename, - String password) - throws GeneralSecurityException, IOException { + private static void saveKeyStore(KeyStore ks, String filename, String password) + throws GeneralSecurityException, IOException { FileOutputStream out = new FileOutputStream(filename); try { ks.store(out, password.toCharArray()); @@ -123,10 +117,9 @@ private static void saveKeyStore(KeyStore ks, String filename, } /** - * Creates a keystore with a single key and saves it to a file. - * This method will use the same password for the keystore and for the key. - * This method will always generate a keystore file in JKS format. - * + * Creates a keystore with a single key and saves it to a file. This method will use the same + * password for the keystore and for the key. This method will always generate a keystore file in + * JKS format. * @param filename String file to save * @param password String store password to set on keystore * @param alias String alias to use for the key @@ -135,17 +128,14 @@ private static void saveKeyStore(KeyStore ks, String filename, * @throws GeneralSecurityException for any error with the security APIs * @throws IOException if there is an I/O error saving the file */ - public static void createKeyStore(String filename, - String password, String alias, - Key privateKey, Certificate cert) - throws GeneralSecurityException, IOException { + public static void createKeyStore(String filename, String password, String alias, Key privateKey, + Certificate cert) throws GeneralSecurityException, IOException { createKeyStore(filename, password, password, alias, privateKey, cert); } /** - * Creates a keystore with a single key and saves it to a file. - * This method will always generate a keystore file in JKS format. - * + * Creates a keystore with a single key and saves it to a file. This method will always generate a + * keystore file in JKS format. * @param filename String file to save * @param password String store password to set on keystore * @param keyPassword String key password to set on key @@ -155,17 +145,13 @@ public static void createKeyStore(String filename, * @throws GeneralSecurityException for any error with the security APIs * @throws IOException if there is an I/O error saving the file */ - public static void createKeyStore(String filename, - String password, String keyPassword, String alias, - Key privateKey, Certificate cert) - throws GeneralSecurityException, IOException { + public static void createKeyStore(String filename, String password, String keyPassword, + String alias, Key privateKey, Certificate cert) throws GeneralSecurityException, IOException { createKeyStore(filename, password, keyPassword, alias, privateKey, cert, "JKS"); } - /** * Creates a keystore with a single key and saves it to a file. - * * @param filename String file to save * @param password String store password to set on keystore * @param keyPassword String key password to set on key @@ -177,19 +163,16 @@ public static void createKeyStore(String filename, * @throws IOException if there is an I/O error saving the file */ public static void createKeyStore(String filename, String password, String keyPassword, - String alias, Key privateKey, Certificate cert, - String keystoreType) - throws GeneralSecurityException, IOException { + String alias, Key privateKey, Certificate cert, String keystoreType) + throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(keystoreType); - ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), - new Certificate[]{cert}); + ks.setKeyEntry(alias, privateKey, keyPassword.toCharArray(), new Certificate[] { cert }); saveKeyStore(ks, filename, password); } /** - * Creates a truststore with a single certificate and saves it to a file. - * This method uses the default JKS truststore type. - * + * Creates a truststore with a single certificate and saves it to a file. This method uses the + * default JKS truststore type. * @param filename String file to save * @param password String store password to set on truststore * @param alias String alias to use for the certificate @@ -197,16 +180,13 @@ public static void createKeyStore(String filename, String password, String keyPa * @throws GeneralSecurityException for any error with the security APIs * @throws IOException if there is an I/O error saving the file */ - public static void createTrustStore(String filename, - String password, String alias, - Certificate cert) - throws GeneralSecurityException, IOException { + public static void createTrustStore(String filename, String password, String alias, + Certificate cert) throws GeneralSecurityException, IOException { createTrustStore(filename, password, alias, cert, "JKS"); } /** * Creates a truststore with a single certificate and saves it to a file. - * * @param filename String file to save * @param password String store password to set on truststore * @param alias String alias to use for the certificate @@ -216,16 +196,14 @@ public static void createTrustStore(String filename, * @throws IOException if there is an I/O error saving the file */ public static void createTrustStore(String filename, String password, String alias, - Certificate cert, String trustStoreType) - throws GeneralSecurityException, IOException { + Certificate cert, String trustStoreType) throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(trustStoreType); ks.setCertificateEntry(alias, cert); saveKeyStore(ks, filename, password); } - public static void createTrustStore( - String filename, String password, Map certs) - throws GeneralSecurityException, IOException { + public static void createTrustStore(String filename, String password, + Map certs) throws GeneralSecurityException, IOException { KeyStore ks = createEmptyKeyStore(); for (Map.Entry cert : certs.entrySet()) { ks.setCertificateEntry(cert.getKey(), cert.getValue()); @@ -233,46 +211,41 @@ public static void createTrustStore( saveKeyStore(ks, filename, password); } - public static void cleanupSSLConfig(Configuration conf) - throws Exception { + public static void cleanupSSLConfig(Configuration conf) throws Exception { File f = new File(conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER, - FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY))); + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_LOCATION_TPL_KEY))); f.delete(); f = new File(conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.SERVER, - FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY))); + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY))); f.delete(); - String clientKeyStore = conf.get(FileBasedKeyStoresFactory - .resolvePropertyName(SSLFactory.Mode.CLIENT, - FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY)); + String clientKeyStore = + conf.get(FileBasedKeyStoresFactory.resolvePropertyName(SSLFactory.Mode.CLIENT, + FileBasedKeyStoresFactory.SSL_KEYSTORE_LOCATION_TPL_KEY)); if (clientKeyStore != null) { f = new File(clientKeyStore); f.delete(); } - f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + conf - .get(SSLFactory.SSL_CLIENT_CONF_KEY)); + f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + + conf.get(SSLFactory.SSL_CLIENT_CONF_KEY)); f.delete(); - f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + conf - .get(SSLFactory.SSL_SERVER_CONF_KEY)); + f = new File(KeyStoreTestUtil.getClasspathDir(KeyStoreTestUtil.class) + "/" + + conf.get(SSLFactory.SSL_SERVER_CONF_KEY)); f.delete(); } /** - * Performs complete setup of SSL configuration in preparation for testing an - * SSLFactory. This includes keys, certs, keystores, truststores, the server - * SSL configuration file, the client SSL configuration file, and the master - * configuration file read by the SSLFactory. - * + * Performs complete setup of SSL configuration in preparation for testing an SSLFactory. This + * includes keys, certs, keystores, truststores, the server SSL configuration file, the client SSL + * configuration file, and the master configuration file read by the SSLFactory. * @param keystoresDir String directory to save keystores * @param sslConfDir String directory to save SSL configuration files - * @param conf Configuration master configuration to be used by an SSLFactory, - * which will be mutated by this method - * @param useClientCert boolean true to make the client present a cert in the - * SSL handshake + * @param conf Configuration master configuration to be used by an SSLFactory, which will be + * mutated by this method + * @param useClientCert boolean true to make the client present a cert in the SSL handshake */ - public static void setupSSLConfig(String keystoresDir, String sslConfDir, - Configuration conf, boolean useClientCert) - throws Exception { + public static void setupSSLConfig(String keystoresDir, String sslConfDir, Configuration conf, + boolean useClientCert) throws Exception { String clientKS = keystoresDir + "/clientKS.jks"; String clientPassword = "clientP"; String serverKS = keystoresDir + "/serverKS.jks"; @@ -280,39 +253,33 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, String trustKS = keystoresDir + "/trustKS.jks"; String trustPassword = "trustP"; - File sslClientConfFile = new File( - sslConfDir + "/ssl-client-" + System.nanoTime() + "-" + HBaseCommonTestingUtility - .getRandomUUID() + ".xml"); - File sslServerConfFile = new File( - sslConfDir + "/ssl-server-" + System.nanoTime() + "-" + HBaseCommonTestingUtility - .getRandomUUID() + ".xml"); + File sslClientConfFile = new File(sslConfDir + "/ssl-client-" + System.nanoTime() + "-" + + HBaseCommonTestingUtility.getRandomUUID() + ".xml"); + File sslServerConfFile = new File(sslConfDir + "/ssl-server-" + System.nanoTime() + "-" + + HBaseCommonTestingUtility.getRandomUUID() + ".xml"); Map certs = new HashMap<>(); if (useClientCert) { KeyPair cKP = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate cCert = - KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, - "SHA1withRSA"); - KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", - cKP.getPrivate(), cCert); + KeyStoreTestUtil.generateCertificate("CN=localhost, O=client", cKP, 30, "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(clientKS, clientPassword, "client", cKP.getPrivate(), cCert); certs.put("client", cCert); } KeyPair sKP = KeyStoreTestUtil.generateKeyPair("RSA"); X509Certificate sCert = - KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, - "SHA1withRSA"); - KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", - sKP.getPrivate(), sCert); + KeyStoreTestUtil.generateCertificate("CN=localhost, O=server", sKP, 30, "SHA1withRSA"); + KeyStoreTestUtil.createKeyStore(serverKS, serverPassword, "server", sKP.getPrivate(), sCert); certs.put("server", sCert); KeyStoreTestUtil.createTrustStore(trustKS, trustPassword, certs); - Configuration clientSSLConf = createClientSSLConfig(clientKS, clientPassword, - clientPassword, trustKS); - Configuration serverSSLConf = createServerSSLConfig(serverKS, serverPassword, - serverPassword, trustKS); + Configuration clientSSLConf = + createClientSSLConfig(clientKS, clientPassword, clientPassword, trustKS); + Configuration serverSSLConf = + createServerSSLConfig(serverKS, serverPassword, serverPassword, trustKS); saveConfig(sslClientConfFile, clientSSLConf); saveConfig(sslServerConfFile, serverSSLConf); @@ -322,60 +289,50 @@ public static void setupSSLConfig(String keystoresDir, String sslConfDir, conf.set(SSLFactory.SSL_SERVER_CONF_KEY, sslServerConfFile.getName()); conf.set("dfs.https.server.keystore.resource", sslServerConfFile.getName()); - conf.setBoolean(SSLFactory.SSL_REQUIRE_CLIENT_CERT_KEY, useClientCert); } /** * Creates SSL configuration for a client. - * * @param clientKS String client keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password * @param trustKS String truststore file * @return Configuration for client SSL */ - public static Configuration createClientSSLConfig(String clientKS, - String password, String keyPassword, String trustKS) { - Configuration clientSSLConf = createSSLConfig(SSLFactory.Mode.CLIENT, - clientKS, password, keyPassword, trustKS); + public static Configuration createClientSSLConfig(String clientKS, String password, + String keyPassword, String trustKS) { + Configuration clientSSLConf = + createSSLConfig(SSLFactory.Mode.CLIENT, clientKS, password, keyPassword, trustKS); return clientSSLConf; } /** * Creates SSL configuration for a server. - * * @param serverKS String server keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password * @param trustKS String truststore file * @return Configuration for server SSL */ - public static Configuration createServerSSLConfig(String serverKS, - String password, String keyPassword, String trustKS) throws IOException { - Configuration serverSSLConf = createSSLConfig(SSLFactory.Mode.SERVER, - serverKS, password, keyPassword, trustKS); + public static Configuration createServerSSLConfig(String serverKS, String password, + String keyPassword, String trustKS) throws IOException { + Configuration serverSSLConf = + createSSLConfig(SSLFactory.Mode.SERVER, serverKS, password, keyPassword, trustKS); return serverSSLConf; } /** * Creates SSL configuration. - * * @param mode SSLFactory.Mode mode to configure * @param keystore String keystore file - * @param password String store password, or null to avoid setting store - * password - * @param keyPassword String key password, or null to avoid setting key - * password + * @param password String store password, or null to avoid setting store password + * @param keyPassword String key password, or null to avoid setting key password * @param trustKS String truststore file * @return Configuration for SSL */ - private static Configuration createSSLConfig(SSLFactory.Mode mode, - String keystore, String password, String keyPassword, String trustKS) { + private static Configuration createSSLConfig(SSLFactory.Mode mode, String keystore, + String password, String keyPassword, String trustKS) { String trustPassword = "trustP"; Configuration sslConf = new Configuration(false); @@ -389,8 +346,7 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, } if (keyPassword != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, - FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), - keyPassword); + FileBasedKeyStoresFactory.SSL_KEYSTORE_KEYPASSWORD_TPL_KEY), keyPassword); } if (trustKS != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, @@ -398,8 +354,7 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, } if (trustPassword != null) { sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, - FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), - trustPassword); + FileBasedKeyStoresFactory.SSL_TRUSTSTORE_PASSWORD_TPL_KEY), trustPassword); } sslConf.set(FileBasedKeyStoresFactory.resolvePropertyName(mode, FileBasedKeyStoresFactory.SSL_TRUSTSTORE_RELOAD_INTERVAL_TPL_KEY), "1000"); @@ -409,13 +364,11 @@ private static Configuration createSSLConfig(SSLFactory.Mode mode, /** * Saves configuration to a file. - * * @param file File to save * @param conf Configuration contents to write to file * @throws IOException if there is an I/O error saving the file */ - public static void saveConfig(File file, Configuration conf) - throws IOException { + public static void saveConfig(File file, Configuration conf) throws IOException { Writer writer = new FileWriter(file); try { conf.writeXml(writer); diff --git a/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java b/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java index c277cd068da3..81c5eec8a559 100644 --- a/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java +++ b/hbase-http/src/test/java/org/apache/hadoop/hbase/util/TestJSONBean.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,6 +22,7 @@ import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.PrintWriter; import java.io.StringWriter; import java.lang.reflect.Type; @@ -39,17 +39,18 @@ import org.junit.ClassRule; import org.junit.Test; import org.junit.experimental.categories.Category; + import org.apache.hbase.thirdparty.com.google.common.reflect.TypeToken; import org.apache.hbase.thirdparty.com.google.gson.Gson; /** * Test {@link JSONBean}. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestJSONBean { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestJSONBean.class); + HBaseClassTestRule.forClass(TestJSONBean.class); private MBeanServer getMockMBeanServer() throws Exception { MBeanServer mbeanServer = mock(MBeanServer.class); @@ -58,23 +59,17 @@ private MBeanServer getMockMBeanServer() throws Exception { when(mbeanServer.queryNames(any(), any())).thenReturn(names); MBeanInfo mbeanInfo = mock(MBeanInfo.class); when(mbeanInfo.getClassName()).thenReturn("testClassName"); - String[] attributeNames = new String[] {"intAttr", "nanAttr", "infinityAttr", - "strAttr", "boolAttr", "test:Attr"}; + String[] attributeNames = + new String[] { "intAttr", "nanAttr", "infinityAttr", "strAttr", "boolAttr", "test:Attr" }; MBeanAttributeInfo[] attributeInfos = new MBeanAttributeInfo[attributeNames.length]; for (int i = 0; i < attributeInfos.length; i++) { - attributeInfos[i] = new MBeanAttributeInfo(attributeNames[i], - null, - null, - true, - false, - false); + attributeInfos[i] = new MBeanAttributeInfo(attributeNames[i], null, null, true, false, false); } when(mbeanInfo.getAttributes()).thenReturn(attributeInfos); when(mbeanServer.getMBeanInfo(any())).thenReturn(mbeanInfo); when(mbeanServer.getAttribute(any(), eq("intAttr"))).thenReturn(3); when(mbeanServer.getAttribute(any(), eq("nanAttr"))).thenReturn(Double.NaN); - when(mbeanServer.getAttribute(any(), eq("infinityAttr"))). - thenReturn(Double.POSITIVE_INFINITY); + when(mbeanServer.getAttribute(any(), eq("infinityAttr"))).thenReturn(Double.POSITIVE_INFINITY); when(mbeanServer.getAttribute(any(), eq("strAttr"))).thenReturn("aString"); when(mbeanServer.getAttribute(any(), eq("boolAttr"))).thenReturn(true); when(mbeanServer.getAttribute(any(), eq("test:Attr"))).thenReturn("aString"); @@ -105,14 +100,14 @@ private String getExpectedJSON() { public void testJSONBeanValueTypes() throws Exception { JSONBean bean = new JSONBean(); StringWriter stringWriter = new StringWriter(); - try ( - PrintWriter printWriter = new PrintWriter(stringWriter); - JSONBean.Writer jsonWriter = bean.open(printWriter)) { + try (PrintWriter printWriter = new PrintWriter(stringWriter); + JSONBean.Writer jsonWriter = bean.open(printWriter)) { jsonWriter.write(getMockMBeanServer(), null, null, false); } final Gson gson = GsonUtil.createGson().create(); - Type typeOfHashMap = new TypeToken>() {}.getType(); + Type typeOfHashMap = new TypeToken>() { + }.getType(); Map expectedJson = gson.fromJson(getExpectedJSON(), typeOfHashMap); Map actualJson = gson.fromJson(stringWriter.toString(), typeOfHashMap); assertEquals(expectedJson, actualJson); diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml index f1ce681c688e..a06a41c833f5 100644 --- a/hbase-it/pom.xml +++ b/hbase-it/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -40,105 +40,6 @@ - - - - - ../hbase-server/src/test/resources - - META-INF/NOTICE - META-INF/LICENSE - - - - src/test/resources - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-failsafe-plugin - ${surefire.version} - - - org.apache.maven.surefire - surefire-junit4 - ${surefire.version} - - - - - ${integrationtest.include} - - - ${unittest.include} - **/*$* - - ${test.output.tofile} - - ${env.LD_LIBRARY_PATH}:${project.build.directory}/nativelib - ${env.DYLD_LIBRARY_PATH}:${project.build.directory}/nativelib - 4 - - false - false - - - - integration-test - integration-test - - integration-test - - - - verify - verify - - verify - - - - - - - - - - - org.apache.maven.plugins - maven-failsafe-plugin - - false - always - - 1800 - -enableassertions -Xmx${failsafe.Xmx} - -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled - -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal - - - - net.revelc.code - warbucks-maven-plugin - - - - + org.apache.hbase hbase-annotations @@ -217,8 +118,8 @@ which pulls in the below. It messes up this build at assembly time. See HBASE-22029--> - com.sun.jersey - jersey-core + com.sun.jersey + jersey-core @@ -288,8 +189,8 @@ test - javax.servlet-api javax.servlet + javax.servlet-api test @@ -314,6 +215,129 @@ + + + + + ../hbase-server/src/test/resources + + META-INF/NOTICE + META-INF/LICENSE + + + + src/test/resources + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-failsafe-plugin + ${surefire.version} + + + ${integrationtest.include} + + + ${unittest.include} + **/*$* + + ${test.output.tofile} + + ${env.LD_LIBRARY_PATH}:${project.build.directory}/nativelib + ${env.DYLD_LIBRARY_PATH}:${project.build.directory}/nativelib + 4 + + false + false + + + + org.apache.maven.surefire + surefire-junit4 + ${surefire.version} + + + + + integration-test + + integration-test + + integration-test + + + verify + + verify + + verify + + + + + + + + + + org.apache.maven.plugins + maven-failsafe-plugin + + false + always + + 1800 + -enableassertions -Xmx${failsafe.Xmx} + -Djava.security.egd=file:/dev/./urandom -XX:+CMSClassUnloadingEnabled + -verbose:gc -XX:+PrintCommandLineFlags -XX:+PrintFlagsFinal + + + + net.revelc.code + warbucks-maven-plugin + + + + + + + + org.apache.maven.plugins + maven-surefire-report-plugin + ${surefire.version} + + + integration-tests + + report-only + + + failsafe-report + + ${project.build.directory}/failsafe-reports + + + + + + + + @@ -345,8 +369,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -388,10 +413,10 @@ create-mrapp-generated-classpath - generate-test-resources build-classpath + generate-test-resources 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -33,35 +31,6 @@ Apache HBase - Logging Logging Support for HBase - - - - src/test/resources - - log4j2.properties - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase @@ -109,4 +78,33 @@ test + + + + + src/test/resources + + log4j2.properties + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java index b0711d7e8f1a..d1cf2bf7cc46 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/InternalLog4jUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ private InternalLog4jUtils() { static void setLogLevel(String loggerName, String levelName) { org.apache.logging.log4j.Level level = - org.apache.logging.log4j.Level.toLevel(levelName.toUpperCase()); + org.apache.logging.log4j.Level.toLevel(levelName.toUpperCase()); if (!level.toString().equalsIgnoreCase(levelName)) { throw new IllegalArgumentException("Unsupported log level " + levelName); } @@ -47,7 +47,7 @@ static void setLogLevel(String loggerName, String levelName) { static String getEffectiveLevel(String loggerName) { org.apache.logging.log4j.Logger logger = - org.apache.logging.log4j.LogManager.getLogger(loggerName); + org.apache.logging.log4j.LogManager.getLogger(loggerName); return logger.getLevel().name(); } @@ -61,27 +61,28 @@ static Set getActiveLogFiles() throws IOException { for (org.apache.logging.log4j.core.Appender appender : coreLogger.getAppenders().values()) { if (appender instanceof org.apache.logging.log4j.core.appender.FileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.FileAppender) appender).getFileName(); + ((org.apache.logging.log4j.core.appender.FileAppender) appender).getFileName(); ret.add(new File(fileName)); } else if (appender instanceof org.apache.logging.log4j.core.appender.AbstractFileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.AbstractFileAppender) appender).getFileName(); + ((org.apache.logging.log4j.core.appender.AbstractFileAppender) appender) + .getFileName(); ret.add(new File(fileName)); } else if (appender instanceof org.apache.logging.log4j.core.appender.RollingFileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.RollingFileAppender) appender).getFileName(); + ((org.apache.logging.log4j.core.appender.RollingFileAppender) appender).getFileName(); ret.add(new File(fileName)); } else if (appender instanceof org.apache.logging.log4j.core.appender.RandomAccessFileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.RandomAccessFileAppender) appender) - .getFileName(); + ((org.apache.logging.log4j.core.appender.RandomAccessFileAppender) appender) + .getFileName(); ret.add(new File(fileName)); } else if (appender instanceof org.apache.logging.log4j.core.appender.MemoryMappedFileAppender) { String fileName = - ((org.apache.logging.log4j.core.appender.MemoryMappedFileAppender) appender) - .getFileName(); + ((org.apache.logging.log4j.core.appender.MemoryMappedFileAppender) appender) + .getFileName(); ret.add(new File(fileName)); } } diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java index e7b5fdd39356..2909b4191383 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/JulToSlf4jInitializer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,6 +37,6 @@ public class JulToSlf4jInitializer { public JulToSlf4jInitializer() throws IOException { LogManager.getLogManager() - .readConfiguration(new ByteArrayInputStream(PROPERTIES.getBytes(StandardCharsets.UTF_8))); + .readConfiguration(new ByteArrayInputStream(PROPERTIES.getBytes(StandardCharsets.UTF_8))); } } diff --git a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java index 9b3459194ab6..36c054b2e6d1 100644 --- a/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java +++ b/hbase-logging/src/main/java/org/apache/hadoop/hbase/logging/Log4jUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ public final class Log4jUtils { private static final String INTERNAL_UTILS_CLASS_NAME = - "org.apache.hadoop.hbase.logging.InternalLog4jUtils"; + "org.apache.hadoop.hbase.logging.InternalLog4jUtils"; private Log4jUtils() { } diff --git a/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java b/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java index 7b3876ce0833..1c3a4bae01aa 100644 --- a/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java +++ b/hbase-logging/src/test/java/org/apache/log4j/FileAppender.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.log4j; import java.io.BufferedWriter; @@ -74,7 +73,7 @@ public FileAppender() { * write to the output file. */ public FileAppender(Layout layout, String fileName, boolean append, boolean bufferedIO, - int bufferSize) throws IOException { + int bufferSize) throws IOException { this.layout = layout; this.setFile(fileName, append, bufferedIO, bufferSize); } @@ -225,7 +224,7 @@ public void setBufferSize(int bufferSize) { * @param append If true will append to fileName. Otherwise will truncate fileName. */ public synchronized void setFile(String fileName, boolean append, boolean bufferedIO, - int bufferSize) throws IOException { + int bufferSize) throws IOException { // It does not make sense to have immediate flush and bufferedIO. if (bufferedIO) { diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml index 9d7a6fc4c080..31b68a6cfd5a 100644 --- a/hbase-mapreduce/pom.xml +++ b/hbase-mapreduce/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-mapreduce Apache HBase - MapReduce - - This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which + This module contains implementations of InputFormat, OutputFormat, Mapper, Reducer, etc which are needed for running MR jobs on tables, WALs, HFiles and other HBase specific constructs. It also contains a bunch of tools: RowCounter, ImportTsv, Import, Export, CompactionTool, - ExportSnapshot, WALPlayer, etc - - - - - - - maven-assembly-plugin - - true - - - - org.apache.maven.plugins - maven-jar-plugin - - - - - org/apache/hadoop/hbase/mapreduce/Driver - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - + ExportSnapshot, WALPlayer, etc @@ -326,6 +289,40 @@ test + + + + + + maven-assembly-plugin + + true + + + + org.apache.maven.plugins + maven-jar-plugin + + + + + org/apache/hadoop/hbase/mapreduce/Driver + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + @@ -345,8 +342,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -377,8 +375,7 @@ lifecycle-mapping - - + diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java index b1f71f057f28..cd442b46d5c1 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/Driver.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,8 +23,7 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Driver for hbase mapreduce jobs. Select which to run by passing name of job - * to this main. + * Driver for hbase mapreduce jobs. Select which to run by passing name of job to this main. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable @@ -44,7 +42,7 @@ static void setProgramDriver(ProgramDriver pgd0) { */ public static void main(String[] args) throws Throwable { pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table"); - ProgramDriver.class.getMethod("driver", new Class[] { String[].class }) - .invoke(pgd, new Object[] { args }); + ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd, + new Object[] { args }); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java index 594816fcf503..04b627718e6f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/GroupingTableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,6 @@ import java.io.IOException; import java.util.ArrayList; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.client.Result; @@ -31,42 +28,37 @@ import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; - +import org.apache.yetus.audience.InterfaceAudience; /** * Extract grouping columns from input record */ @InterfaceAudience.Public -public class GroupingTableMap -extends MapReduceBase -implements TableMap { +public class GroupingTableMap extends MapReduceBase + implements TableMap { /** - * JobConf parameter to specify the columns used to produce the key passed to - * collect from the map phase + * JobConf parameter to specify the columns used to produce the key passed to collect from the map + * phase */ - public static final String GROUP_COLUMNS = - "hbase.mapred.groupingtablemap.columns"; + public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; - protected byte [][] columns; + protected byte[][] columns; /** - * Use this before submitting a TableMap job. It will appropriately set up the - * JobConf. - * + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. * @param table table to be processed * @param columns space separated list of columns to fetch - * @param groupColumns space separated list of columns used to form the key - * used in collect + * @param groupColumns space separated list of columns used to form the key used in collect * @param mapper map class * @param job job configuration object */ @SuppressWarnings("unchecked") public static void initJob(String table, String columns, String groupColumns, - Class mapper, JobConf job) { + Class mapper, JobConf job) { - TableMapReduceUtil.initTableMapJob(table, columns, mapper, - ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, + Result.class, job); job.set(GROUP_COLUMNS, groupColumns); } @@ -75,16 +67,14 @@ public void configure(JobConf job) { super.configure(job); String[] cols = job.get(GROUP_COLUMNS, "").split(" "); columns = new byte[cols.length][]; - for(int i = 0; i < cols.length; i++) { + for (int i = 0; i < cols.length; i++) { columns[i] = Bytes.toBytes(cols[i]); } } /** - * Extract the grouping columns from value to construct a new key. - * - * Pass the new key and value to reduce. - * If any of the grouping columns are not found in the value, the record is skipped. + * Extract the grouping columns from value to construct a new key. Pass the new key and value to + * reduce. If any of the grouping columns are not found in the value, the record is skipped. * @param key * @param value * @param output @@ -92,22 +82,19 @@ public void configure(JobConf job) { * @throws IOException */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) + throws IOException { byte[][] keyVals = extractKeyValues(value); - if(keyVals != null) { + if (keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); output.collect(tKey, value); } } /** - * Extract columns values from the current record. This method returns - * null if any of the columns are not found. - * - * Override this method if you want to deal with nulls differently. - * + * Extract columns values from the current record. This method returns null if any of the columns + * are not found. Override this method if you want to deal with nulls differently. * @param r * @return array of byte values */ @@ -116,9 +103,9 @@ protected byte[][] extractKeyValues(Result r) { ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { - for (Cell value: r.listCells()) { - byte [] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value)); + for (Cell value : r.listCells()) { + byte[] column = + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { foundList.add(CellUtil.cloneValue(value)); @@ -126,7 +113,7 @@ protected byte[][] extractKeyValues(Result r) { } } } - if(foundList.size() == numCols) { + if (foundList.size() == numCols) { keyVals = foundList.toArray(new byte[numCols][]); } } @@ -134,19 +121,18 @@ protected byte[][] extractKeyValues(Result r) { } /** - * Create a key by concatenating multiple column values. - * Override this function in order to produce different types of keys. - * + * Create a key by concatenating multiple column values. Override this function in order to + * produce different types of keys. * @param vals * @return key generated by concatenating multiple column values */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { - if(vals == null) { + if (vals == null) { return null; } - StringBuilder sb = new StringBuilder(); - for(int i = 0; i < vals.length; i++) { - if(i > 0) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < vals.length; i++) { + if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java index 78062588e828..91b4d0713434 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/HRegionPartitioner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,9 +20,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; @@ -31,18 +27,18 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Partitioner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is used to partition the output keys into groups of keys. - * Keys are grouped according to the regions that currently exist - * so that each reducer fills a single region so load is distributed. - * + * This is used to partition the output keys into groups of keys. Keys are grouped according to the + * regions that currently exist so that each reducer fills a single region so load is distributed. * @param * @param */ @InterfaceAudience.Public -public class HRegionPartitioner -implements Partitioner { +public class HRegionPartitioner implements Partitioner { private static final Logger LOG = LoggerFactory.getLogger(HRegionPartitioner.class); // Connection and locator are not cleaned up; they just die when partitioner is done. private Connection connection; @@ -70,7 +66,7 @@ public void configure(JobConf job) { public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) { byte[] region = null; // Only one region return 0 - if (this.startKeys.length == 1){ + if (this.startKeys.length == 1) { return 0; } try { @@ -80,12 +76,11 @@ public int getPartition(ImmutableBytesWritable key, V2 value, int numPartitions) } catch (IOException e) { LOG.error(e.toString(), e); } - for (int i = 0; i < this.startKeys.length; i++){ - if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ - if (i >= numPartitions){ + for (int i = 0; i < this.startKeys.length; i++) { + if (Bytes.compareTo(region, this.startKeys[i]) == 0) { + if (i >= numPartitions) { // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() - & Integer.MAX_VALUE) % numPartitions; + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java index c97bcc025230..fd3eb2a4d153 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,22 +18,20 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; /** * Pass the given key and record as-is to reduce */ @InterfaceAudience.Public -public class IdentityTableMap -extends MapReduceBase -implements TableMap { +public class IdentityTableMap extends MapReduceBase + implements TableMap { /** constructor */ public IdentityTableMap() { @@ -42,19 +39,16 @@ public IdentityTableMap() { } /** - * Use this before submitting a TableMap job. It will - * appropriately set up the JobConf. - * + * Use this before submitting a TableMap job. It will appropriately set up the JobConf. * @param table table name * @param columns columns to scan * @param mapper mapper class * @param job job configuration */ @SuppressWarnings("unchecked") - public static void initJob(String table, String columns, - Class mapper, JobConf job) { - TableMapReduceUtil.initTableMapJob(table, columns, mapper, - ImmutableBytesWritable.class, + public static void initJob(String table, String columns, Class mapper, + JobConf job) { + TableMapReduceUtil.initTableMapJob(table, columns, mapper, ImmutableBytesWritable.class, Result.class, job); } @@ -67,8 +61,8 @@ public static void initJob(String table, String columns, * @throws IOException */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) + throws IOException { // convert output.collect(key, value); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java index ba1df4c3a835..94c6d248e437 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/IdentityTableReduce.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,26 +19,23 @@ import java.io.IOException; import java.util.Iterator; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapred.MapReduceBase; import org.apache.hadoop.mapred.OutputCollector; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Write to table each key, record pair */ @InterfaceAudience.Public -public class IdentityTableReduce -extends MapReduceBase -implements TableReduce { +public class IdentityTableReduce extends MapReduceBase + implements TableReduce { @SuppressWarnings("unused") - private static final Logger LOG = - LoggerFactory.getLogger(IdentityTableReduce.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReduce.class.getName()); /** * No aggregation, output pairs of (key, record) @@ -50,11 +46,9 @@ public class IdentityTableReduce * @throws IOException */ public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector output, - Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { - while(values.hasNext()) { + while (values.hasNext()) { output.collect(key, values.next()); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java index 7902d1a3b4c3..a415c5dbe663 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/MultiTableSnapshotInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,13 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapred; import edu.umd.cs.findbugs.annotations.SuppressWarnings; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -32,33 +34,25 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; - -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import org.apache.yetus.audience.InterfaceAudience; /** * MultiTableSnapshotInputFormat generalizes - * {@link org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat} - * allowing a MapReduce job to run over one or more table snapshots, with one or more scans - * configured for each. - * Internally, the input format delegates to - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * and thus has the same performance advantages; see - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * for more details. - * Usage is similar to TableSnapshotInputFormat, with the following exception: - * initMultiTableSnapshotMapperJob takes in a map - * from snapshot name to a collection of scans. For each snapshot in the map, each corresponding - * scan will be applied; - * the overall dataset for the job is defined by the concatenation of the regions and tables - * included in each snapshot/scan + * {@link org.apache.hadoop.hbase.mapred.TableSnapshotInputFormat} allowing a MapReduce job to run + * over one or more table snapshots, with one or more scans configured for each. Internally, the + * input format delegates to {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} and + * thus has the same performance advantages; see + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more details. Usage is + * similar to TableSnapshotInputFormat, with the following exception: + * initMultiTableSnapshotMapperJob takes in a map from snapshot name to a collection of scans. For + * each snapshot in the map, each corresponding scan will be applied; the overall dataset for the + * job is defined by the concatenation of the regions and tables included in each snapshot/scan * pair. - * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, - * Class, Class, Class, JobConf, boolean, Path)} + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob(Map, Class, Class, Class, JobConf, boolean, Path)} * can be used to configure the job. - *

    {@code
    + * 
    + * 
    + * {@code
      * Job job = new Job(conf);
      * Map> snapshotScans = ImmutableMap.of(
      *    "snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("a"), Bytes.toBytes("b"))),
    @@ -70,15 +64,12 @@
      *      MyMapOutputValueWritable.class, job, true, restoreDir);
      * }
      * 
    + * * Internally, this input format restores each snapshot into a subdirectory of the given tmp - * directory. Input splits and - * record readers are created as described in - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * (one per region). - * See {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on - * permissioning; the - * same caveats apply here. - * + * directory. Input splits and record readers are created as described in + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} (one per region). See + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} for more notes on + * permissioning; the same caveats apply here. * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ @@ -111,11 +102,9 @@ public RecordReader getRecordReader(InputSplit s @SuppressWarnings("checkstyle:linelength") /** * Configure conf to read from snapshotScans, with snapshots restored to a subdirectory of - * restoreDir. - * Sets: + * restoreDir. Sets: * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#RESTORE_DIRS_KEY}, * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormatImpl#SNAPSHOT_TO_SCANS_KEY} - * * @param conf * @param snapshotScans * @param restoreDir diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java index 75b221c5526b..4d1206e9b690 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/RowCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +18,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,11 +30,11 @@ import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; /** - * A job with a map to count rows. - * Map outputs table rows IF the input row has columns that have content. - * Uses a org.apache.hadoop.mapred.lib.IdentityReducer + * A job with a map to count rows. Map outputs table rows IF the input row has columns that have + * content. Uses a org.apache.hadoop.mapred.lib.IdentityReducer */ @InterfaceAudience.Public public class RowCounter extends Configured implements Tool { @@ -47,16 +44,16 @@ public class RowCounter extends Configured implements Tool { /** * Mapper that runs the count. */ - static class RowCounterMapper - implements TableMap { - private static enum Counters {ROWS} + static class RowCounterMapper implements TableMap { + private static enum Counters { + ROWS + } public void map(ImmutableBytesWritable row, Result values, - OutputCollector output, - Reporter reporter) - throws IOException { - // Count every row containing data, whether it's in qualifiers or values - reporter.incrCounter(Counters.ROWS, 1); + OutputCollector output, Reporter reporter) + throws IOException { + // Count every row containing data, whether it's in qualifiers or values + reporter.incrCounter(Counters.ROWS, 1); } public void configure(JobConf jc) { @@ -86,8 +83,8 @@ public JobConf createSubmittableJob(String[] args) throws IOException { sb.append(args[i]); } // Second argument is the table name. - TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, c); + TableMapReduceUtil.initTableMapJob(args[1], sb.toString(), RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, c); c.setNumReduceTasks(0); // First arg is the output directory. FileOutputFormat.setOutputPath(c, new Path(args[0])); @@ -95,8 +92,7 @@ public JobConf createSubmittableJob(String[] args) throws IOException { } static int printUsage() { - System.out.println(NAME + - " [...]"); + System.out.println(NAME + " [...]"); return -1; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java index d9bb66bdf07f..3e38b0172ca0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +18,6 @@ package org.apache.hadoop.hbase.mapred; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -32,13 +27,15 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.JobConfigurable; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ @InterfaceAudience.Public -public class TableInputFormat extends TableInputFormatBase implements - JobConfigurable { +public class TableInputFormat extends TableInputFormatBase implements JobConfigurable { private static final Logger LOG = LoggerFactory.getLogger(TableInputFormat.class); /** @@ -59,7 +56,7 @@ protected void initialize(JobConf job) throws IOException { Path[] tableNames = FileInputFormat.getInputPaths(job); String colArg = job.get(COLUMN_LIST); String[] colNames = colArg.split(" "); - byte [][] m_cols = new byte[colNames.length][]; + byte[][] m_cols = new byte[colNames.length][]; for (int i = 0; i < m_cols.length; i++) { m_cols[i] = Bytes.toBytes(colNames[i]); } @@ -70,15 +67,14 @@ protected void initialize(JobConf job) throws IOException { public void validateInput(JobConf job) throws IOException { // expecting exactly one path - Path [] tableNames = FileInputFormat.getInputPaths(job); + Path[] tableNames = FileInputFormat.getInputPaths(job); if (tableNames == null || tableNames.length > 1) { throw new IOException("expecting one table name"); } // connected to table? if (getTable() == null) { - throw new IOException("could not connect to table '" + - tableNames[0].getName() + "'"); + throw new IOException("could not connect to table '" + tableNames[0].getName() + "'"); } // expecting at least one column diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java index 509972e92aa5..cbd7882ad7ef 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,10 +19,6 @@ import java.io.Closeable; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; @@ -37,21 +32,22 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * A Base for {@link TableInputFormat}s. Receives a {@link Table}, a - * byte[] of input columns and optionally a {@link Filter}. - * Subclasses may use other TableRecordReader implementations. - * + * A Base for {@link TableInputFormat}s. Receives a {@link Table}, a byte[] of input columns and + * optionally a {@link Filter}. Subclasses may use other TableRecordReader implementations. * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to * function properly. Each of the entry points to this class used by the MapReduce framework, * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)}, - * will call {@link #initialize(JobConf)} as a convenient centralized location to handle - * retrieving the necessary configuration information. If your subclass overrides either of these - * methods, either call the parent version or call initialize yourself. - * + * will call {@link #initialize(JobConf)} as a convenient centralized location to handle retrieving + * the necessary configuration information. If your subclass overrides either of these methods, + * either call the parent version or call initialize yourself. *

    * An example of a subclass: + * *

      *   class ExampleTIF extends TableInputFormatBase {
      *
    @@ -77,33 +73,28 @@
      */
     
     @InterfaceAudience.Public
    -public abstract class TableInputFormatBase
    -implements InputFormat {
    +public abstract class TableInputFormatBase implements InputFormat {
       private static final Logger LOG = LoggerFactory.getLogger(TableInputFormatBase.class);
    -  private byte [][] inputColumns;
    +  private byte[][] inputColumns;
       private Table table;
       private RegionLocator regionLocator;
       private Connection connection;
       private TableRecordReader tableRecordReader;
       private Filter rowFilter;
     
    -  private static final String NOT_INITIALIZED = "The input format instance has not been properly " +
    -      "initialized. Ensure you call initializeTable either in your constructor or initialize " +
    -      "method";
    -  private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" +
    -            " previous error. Please look at the previous logs lines from" +
    -            " the task's full log for more details.";
    +  private static final String NOT_INITIALIZED = "The input format instance has not been properly "
    +      + "initialized. Ensure you call initializeTable either in your constructor or initialize "
    +      + "method";
    +  private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a"
    +      + " previous error. Please look at the previous logs lines from"
    +      + " the task's full log for more details.";
     
       /**
    -   * Builds a TableRecordReader. If no TableRecordReader was provided, uses
    -   * the default.
    -   *
    -   * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit,
    -   *      JobConf, Reporter)
    +   * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default.
    +   * @see org.apache.hadoop.mapred.InputFormat#getRecordReader(InputSplit, JobConf, Reporter)
        */
    -  public RecordReader getRecordReader(
    -      InputSplit split, JobConf job, Reporter reporter)
    -  throws IOException {
    +  public RecordReader getRecordReader(InputSplit split, JobConf job,
    +      Reporter reporter) throws IOException {
         // In case a subclass uses the deprecated approach or calls initializeTable directly
         if (table == null) {
           initialize(job);
    @@ -120,8 +111,8 @@ public RecordReader getRecordReader(
     
         TableSplit tSplit = (TableSplit) split;
         // if no table record reader was provided use default
    -    final TableRecordReader trr = this.tableRecordReader == null ? new TableRecordReader() :
    -        this.tableRecordReader;
    +    final TableRecordReader trr =
    +        this.tableRecordReader == null ? new TableRecordReader() : this.tableRecordReader;
         trr.setStartRow(tSplit.getStartRow());
         trr.setEndRow(tSplit.getEndRow());
         trr.setHTable(this.table);
    @@ -164,22 +155,16 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
       }
     
       /**
    -   * Calculates the splits that will serve as input for the map tasks.
    -   *
    -   * Splits are created in number equal to the smallest between numSplits and
    -   * the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
    -   * If the number of splits is smaller than the number of
    -   * {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits are spanned across
    -   * multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s
    -   * and are grouped the most evenly possible. In the
    -   * case splits are uneven the bigger splits are placed first in the
    -   * {@link InputSplit} array.
    -   *
    +   * Calculates the splits that will serve as input for the map tasks. Splits are created in number
    +   * equal to the smallest between numSplits and the number of
    +   * {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table. If the number of splits is
    +   * smaller than the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s then splits
    +   * are spanned across multiple {@link org.apache.hadoop.hbase.regionserver.HRegion}s and are
    +   * grouped the most evenly possible. In the case splits are uneven the bigger splits are placed
    +   * first in the {@link InputSplit} array.
        * @param job the map task {@link JobConf}
        * @param numSplits a hint to calculate the number of splits (mapred.map.tasks).
    -   *
        * @return the input splits
    -   *
        * @see org.apache.hadoop.mapred.InputFormat#getSplits(org.apache.hadoop.mapred.JobConf, int)
        */
       public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
    @@ -196,26 +181,24 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
           throw new IOException(INITIALIZATION_ERROR, exception);
         }
     
    -    byte [][] startKeys = this.regionLocator.getStartKeys();
    +    byte[][] startKeys = this.regionLocator.getStartKeys();
         if (startKeys == null || startKeys.length == 0) {
           throw new IOException("Expecting at least one region");
         }
         if (this.inputColumns == null || this.inputColumns.length == 0) {
           throw new IOException("Expecting at least one column");
         }
    -    int realNumSplits = numSplits > startKeys.length? startKeys.length:
    -      numSplits;
    +    int realNumSplits = numSplits > startKeys.length ? startKeys.length : numSplits;
         InputSplit[] splits = new InputSplit[realNumSplits];
         int middle = startKeys.length / realNumSplits;
         int startPos = 0;
         for (int i = 0; i < realNumSplits; i++) {
           int lastPos = startPos + middle;
           lastPos = startKeys.length % realNumSplits > i ? lastPos + 1 : lastPos;
    -      String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).
    -        getHostname();
    -      splits[i] = new TableSplit(this.table.getName(),
    -        startKeys[startPos], ((i + 1) < realNumSplits) ? startKeys[lastPos]:
    -          HConstants.EMPTY_START_ROW, regionLocation);
    +      String regionLocation = regionLocator.getRegionLocation(startKeys[startPos]).getHostname();
    +      splits[i] = new TableSplit(this.table.getName(), startKeys[startPos],
    +          ((i + 1) < realNumSplits) ? startKeys[lastPos] : HConstants.EMPTY_START_ROW,
    +          regionLocation);
           LOG.info("split: " + i + "->" + splits[i]);
           startPos = lastPos;
         }
    @@ -224,15 +207,14 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     
       /**
        * Allows subclasses to initialize the table information.
    -   *
    -   * @param connection  The Connection to the HBase cluster. MUST be unmanaged. We will close.
    -   * @param tableName  The {@link TableName} of the table to process.
    +   * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close.
    +   * @param tableName The {@link TableName} of the table to process.
        * @throws IOException
        */
       protected void initializeTable(Connection connection, TableName tableName) throws IOException {
         if (this.table != null || this.connection != null) {
    -      LOG.warn("initializeTable called multiple times. Overwriting connection and table " +
    -          "reference; TableInputFormatBase will not close these old references when done.");
    +      LOG.warn("initializeTable called multiple times. Overwriting connection and table "
    +          + "reference; TableInputFormatBase will not close these old references when done.");
         }
         this.table = connection.getTable(tableName);
         this.regionLocator = connection.getRegionLocator(tableName);
    @@ -242,7 +224,7 @@ protected void initializeTable(Connection connection, TableName tableName) throw
       /**
        * @param inputColumns to be passed in {@link Result} to the map task.
        */
    -  protected void setInputColumns(byte [][] inputColumns) {
    +  protected void setInputColumns(byte[][] inputColumns) {
         this.inputColumns = inputColumns;
       }
     
    @@ -258,9 +240,7 @@ protected Table getTable() {
     
       /**
        * Allows subclasses to set the {@link TableRecordReader}.
    -   *
    -   * @param tableRecordReader
    -   *                to provide other {@link TableRecordReader} implementations.
    +   * @param tableRecordReader to provide other {@link TableRecordReader} implementations.
        */
       protected void setTableRecordReader(TableRecordReader tableRecordReader) {
         this.tableRecordReader = tableRecordReader;
    @@ -268,7 +248,6 @@ protected void setTableRecordReader(TableRecordReader tableRecordReader) {
     
       /**
        * Allows subclasses to set the {@link Filter} to be used.
    -   *
        * @param rowFilter
        */
       protected void setRowFilter(Filter rowFilter) {
    @@ -276,19 +255,15 @@ protected void setRowFilter(Filter rowFilter) {
       }
     
       /**
    -   * Handle subclass specific set up.
    -   * Each of the entry points used by the MapReduce framework,
    +   * Handle subclass specific set up. Each of the entry points used by the MapReduce framework,
        * {@link #getRecordReader(InputSplit, JobConf, Reporter)} and {@link #getSplits(JobConf, int)},
        * will call {@link #initialize(JobConf)} as a convenient centralized location to handle
        * retrieving the necessary configuration information and calling
    -   * {@link #initializeTable(Connection, TableName)}.
    -   *
    -   * Subclasses should implement their initialize call such that it is safe to call multiple times.
    -   * The current TableInputFormatBase implementation relies on a non-null table reference to decide
    -   * if an initialize call is needed, but this behavior may change in the future. In particular,
    -   * it is critical that initializeTable not be called multiple times since this will leak
    -   * Connection instances.
    -   *
    +   * {@link #initializeTable(Connection, TableName)}. Subclasses should implement their initialize
    +   * call such that it is safe to call multiple times. The current TableInputFormatBase
    +   * implementation relies on a non-null table reference to decide if an initialize call is needed,
    +   * but this behavior may change in the future. In particular, it is critical that initializeTable
    +   * not be called multiple times since this will leak Connection instances.
        */
       protected void initialize(JobConf job) throws IOException {
       }
    @@ -296,7 +271,6 @@ protected void initialize(JobConf job) throws IOException {
       /**
        * Close the Table and related objects that were initialized via
        * {@link #initializeTable(Connection, TableName)}.
    -   *
        * @throws IOException
        */
       protected void closeTable() throws IOException {
    @@ -307,7 +281,9 @@ protected void closeTable() throws IOException {
     
       private void close(Closeable... closables) throws IOException {
         for (Closeable c : closables) {
    -      if(c != null) { c.close(); }
    +      if (c != null) {
    +        c.close();
    +      }
         }
       }
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
    index d76572722b6f..5b3d088cb5a3 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMap.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -18,21 +17,20 @@
      */
     package org.apache.hadoop.hbase.mapred;
     
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Result;
     import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
     import org.apache.hadoop.io.WritableComparable;
     import org.apache.hadoop.mapred.Mapper;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
    - * Scan an HBase table to sort by a specified sort column.
    - * If the column does not exist, the record is not passed to Reduce.
    - *
    + * Scan an HBase table to sort by a specified sort column. If the column does not exist, the record
    + * is not passed to Reduce.
      * @param  WritableComparable key class
      * @param  Writable value class
      */
     @InterfaceAudience.Public
     public interface TableMap, V>
    -extends Mapper {
    +    extends Mapper {
     
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
    index 754bf2959a62..8a2058334f55 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableMapReduceUtil.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -18,11 +17,13 @@
      */
     package org.apache.hadoop.hbase.mapred;
     
    +import java.io.IOException;
    +import java.util.Collection;
    +import java.util.Map;
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.fs.Path;
     import org.apache.hadoop.hbase.HBaseConfiguration;
     import org.apache.hadoop.hbase.TableName;
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Connection;
     import org.apache.hadoop.hbase.client.ConnectionFactory;
     import org.apache.hadoop.hbase.client.Put;
    @@ -41,10 +42,7 @@
     import org.apache.hadoop.mapred.OutputFormat;
     import org.apache.hadoop.mapred.TextInputFormat;
     import org.apache.hadoop.mapred.TextOutputFormat;
    -
    -import java.io.IOException;
    -import java.util.Collection;
    -import java.util.Map;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Utility for {@link TableMap} and {@link TableReduce}
    @@ -54,57 +52,47 @@
     public class TableMapReduceUtil {
     
       /**
    -   * Use this before submitting a TableMap job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The table name to read from.
    -   * @param columns  The columns to scan.
    -   * @param mapper  The mapper class to use.
    -   * @param outputKeyClass  The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param job  The current job configuration to adjust.
    +   * Use this before submitting a TableMap job. It will appropriately set up the JobConf.
    +   * @param table The table name to read from.
    +   * @param columns The columns to scan.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param job The current job configuration to adjust.
        */
    -  public static void initTableMapJob(String table, String columns,
    -    Class mapper,
    -    Class outputKeyClass,
    -    Class outputValueClass, JobConf job) {
    -    initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job,
    -      true, TableInputFormat.class);
    +  public static void initTableMapJob(String table, String columns, Class mapper,
    +      Class outputKeyClass, Class outputValueClass, JobConf job) {
    +    initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job, true,
    +      TableInputFormat.class);
       }
     
    -  public static void initTableMapJob(String table, String columns,
    -    Class mapper,
    -    Class outputKeyClass,
    -    Class outputValueClass, JobConf job, boolean addDependencyJars) {
    +  public static void initTableMapJob(String table, String columns, Class mapper,
    +      Class outputKeyClass, Class outputValueClass, JobConf job, boolean addDependencyJars) {
         initTableMapJob(table, columns, mapper, outputKeyClass, outputValueClass, job,
           addDependencyJars, TableInputFormat.class);
       }
     
       /**
    -   * Use this before submitting a TableMap job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The table name to read from.
    -   * @param columns  The columns to scan.
    -   * @param mapper  The mapper class to use.
    -   * @param outputKeyClass  The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param job  The current job configuration to adjust.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *           job classes via the distributed cache (tmpjars).
    +   * Use this before submitting a TableMap job. It will appropriately set up the JobConf.
    +   * @param table The table name to read from.
    +   * @param columns The columns to scan.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param job The current job configuration to adjust.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        */
    -  public static void initTableMapJob(String table, String columns,
    -    Class mapper,
    -    Class outputKeyClass,
    -    Class outputValueClass, JobConf job, boolean addDependencyJars,
    -    Class inputFormat) {
    +  public static void initTableMapJob(String table, String columns, Class mapper,
    +      Class outputKeyClass, Class outputValueClass, JobConf job, boolean addDependencyJars,
    +      Class inputFormat) {
     
         job.setInputFormat(inputFormat);
         job.setMapOutputValueClass(outputValueClass);
         job.setMapOutputKeyClass(outputKeyClass);
         job.setMapperClass(mapper);
         job.setStrings("io.serializations", job.get("io.serializations"),
    -        MutationSerialization.class.getName(), ResultSerialization.class.getName());
    +      MutationSerialization.class.getName(), ResultSerialization.class.getName());
         FileInputFormat.addInputPaths(job, table);
         job.set(TableInputFormat.COLUMN_LIST, columns);
         if (addDependencyJars) {
    @@ -117,24 +105,22 @@ public static void initTableMapJob(String table, String columns,
         try {
           initCredentials(job);
         } catch (IOException ioe) {
    -      // just spit out the stack trace?  really?
    +      // just spit out the stack trace? really?
           ioe.printStackTrace();
         }
       }
     
       /**
        * Sets up the job for reading from one or more multiple table snapshots, with one or more scans
    -   * per snapshot.
    -   * It bypasses hbase servers and read directly from snapshot files.
    -   *
    -   * @param snapshotScans     map of snapshot name to scans on that snapshot.
    -   * @param mapper            The mapper class to use.
    -   * @param outputKeyClass    The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param job               The current job to adjust.  Make sure the passed job is
    -   *                          carrying all necessary HBase configuration.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *                          job classes via the distributed cache (tmpjars).
    +   * per snapshot. It bypasses hbase servers and read directly from snapshot files.
    +   * @param snapshotScans map of snapshot name to scans on that snapshot.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase
    +   *          configuration.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        */
       public static void initMultiTableSnapshotMapperJob(Map> snapshotScans,
           Class mapper, Class outputKeyClass, Class outputValueClass,
    @@ -157,30 +143,26 @@ public static void initMultiTableSnapshotMapperJob(Map>
       }
     
       /**
    -   * Sets up the job for reading from a table snapshot. It bypasses hbase servers
    -   * and read directly from snapshot files.
    -   *
    +   * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly
    +   * from snapshot files.
        * @param snapshotName The name of the snapshot (of a table) to read from.
    -   * @param columns  The columns to scan.
    -   * @param mapper  The mapper class to use.
    -   * @param outputKeyClass  The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param job  The current job to adjust.  Make sure the passed job is
    -   * carrying all necessary HBase configuration.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *           job classes via the distributed cache (tmpjars).
    +   * @param columns The columns to scan.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase
    +   *          configuration.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should
    -   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
    -   * After the job is finished, restore directory can be deleted.
    +   *          have write permissions to this directory, and this should not be a subdirectory of
    +   *          rootdir. After the job is finished, restore directory can be deleted.
        * @throws IOException When setting up the details fails.
        * @see TableSnapshotInputFormat
        */
       public static void initTableSnapshotMapJob(String snapshotName, String columns,
    -      Class mapper,
    -      Class outputKeyClass,
    -      Class outputValueClass, JobConf job,
    -      boolean addDependencyJars, Path tmpRestoreDir)
    -  throws IOException {
    +      Class mapper, Class outputKeyClass, Class outputValueClass,
    +      JobConf job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException {
         TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir);
         initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, job,
           addDependencyJars, TableSnapshotInputFormat.class);
    @@ -188,97 +170,80 @@ public static void initTableSnapshotMapJob(String snapshotName, String columns,
       }
     
       /**
    -   * Sets up the job for reading from a table snapshot. It bypasses hbase servers
    -   * and read directly from snapshot files.
    -   *
    +   * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly
    +   * from snapshot files.
        * @param snapshotName The name of the snapshot (of a table) to read from.
    -   * @param columns  The columns to scan.
    -   * @param mapper  The mapper class to use.
    -   * @param outputKeyClass  The class of the output key.
    -   * @param outputValueClass  The class of the output value.
    -   * @param jobConf  The current job to adjust.  Make sure the passed job is
    -   * carrying all necessary HBase configuration.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *           job classes via the distributed cache (tmpjars).
    +   * @param columns The columns to scan.
    +   * @param mapper The mapper class to use.
    +   * @param outputKeyClass The class of the output key.
    +   * @param outputValueClass The class of the output value.
    +   * @param jobConf The current job to adjust. Make sure the passed job is carrying all necessary
    +   *          HBase configuration.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should
    -   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
    -   * After the job is finished, restore directory can be deleted.
    +   *          have write permissions to this directory, and this should not be a subdirectory of
    +   *          rootdir. After the job is finished, restore directory can be deleted.
        * @param splitAlgo algorithm to split
        * @param numSplitsPerRegion how many input splits to generate per one region
        * @throws IOException When setting up the details fails.
        * @see TableSnapshotInputFormat
        */
       public static void initTableSnapshotMapJob(String snapshotName, String columns,
    -                                             Class mapper,
    -                                             Class outputKeyClass,
    -                                             Class outputValueClass, JobConf jobConf,
    -                                             boolean addDependencyJars, Path tmpRestoreDir,
    -                                             RegionSplitter.SplitAlgorithm splitAlgo,
    -                                             int numSplitsPerRegion)
    -          throws IOException {
    +      Class mapper, Class outputKeyClass, Class outputValueClass,
    +      JobConf jobConf, boolean addDependencyJars, Path tmpRestoreDir,
    +      RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
         TableSnapshotInputFormat.setInput(jobConf, snapshotName, tmpRestoreDir, splitAlgo,
    -            numSplitsPerRegion);
    +      numSplitsPerRegion);
         initTableMapJob(snapshotName, columns, mapper, outputKeyClass, outputValueClass, jobConf,
    -            addDependencyJars, TableSnapshotInputFormat.class);
    +      addDependencyJars, TableSnapshotInputFormat.class);
         org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.resetCacheConfig(jobConf);
       }
     
    -
       /**
    -   * Use this before submitting a TableReduce job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The output table.
    -   * @param reducer  The reducer class to use.
    -   * @param job  The current job configuration to adjust.
    +   * Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
    +   * @param table The output table.
    +   * @param reducer The reducer class to use.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When determining the region count fails.
        */
    -  public static void initTableReduceJob(String table,
    -    Class reducer, JobConf job)
    -  throws IOException {
    +  public static void initTableReduceJob(String table, Class reducer,
    +      JobConf job) throws IOException {
         initTableReduceJob(table, reducer, job, null);
       }
     
       /**
    -   * Use this before submitting a TableReduce job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The output table.
    -   * @param reducer  The reducer class to use.
    -   * @param job  The current job configuration to adjust.
    -   * @param partitioner  Partitioner to use. Pass null to use
    -   * default partitioner.
    +   * Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
    +   * @param table The output table.
    +   * @param reducer The reducer class to use.
    +   * @param job The current job configuration to adjust.
    +   * @param partitioner Partitioner to use. Pass null to use default partitioner.
        * @throws IOException When determining the region count fails.
        */
    -  public static void initTableReduceJob(String table,
    -    Class reducer, JobConf job, Class partitioner)
    -  throws IOException {
    +  public static void initTableReduceJob(String table, Class reducer,
    +      JobConf job, Class partitioner) throws IOException {
         initTableReduceJob(table, reducer, job, partitioner, true);
       }
     
       /**
    -   * Use this before submitting a TableReduce job. It will
    -   * appropriately set up the JobConf.
    -   *
    -   * @param table  The output table.
    -   * @param reducer  The reducer class to use.
    -   * @param job  The current job configuration to adjust.
    -   * @param partitioner  Partitioner to use. Pass null to use
    -   * default partitioner.
    -   * @param addDependencyJars upload HBase jars and jars for any of the configured
    -   *           job classes via the distributed cache (tmpjars).
    +   * Use this before submitting a TableReduce job. It will appropriately set up the JobConf.
    +   * @param table The output table.
    +   * @param reducer The reducer class to use.
    +   * @param job The current job configuration to adjust.
    +   * @param partitioner Partitioner to use. Pass null to use default partitioner.
    +   * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via
    +   *          the distributed cache (tmpjars).
        * @throws IOException When determining the region count fails.
        */
    -  public static void initTableReduceJob(String table,
    -    Class reducer, JobConf job, Class partitioner,
    -    boolean addDependencyJars) throws IOException {
    +  public static void initTableReduceJob(String table, Class reducer,
    +      JobConf job, Class partitioner, boolean addDependencyJars) throws IOException {
         job.setOutputFormat(TableOutputFormat.class);
         job.setReducerClass(reducer);
         job.set(TableOutputFormat.OUTPUT_TABLE, table);
         job.setOutputKeyClass(ImmutableBytesWritable.class);
         job.setOutputValueClass(Put.class);
         job.setStrings("io.serializations", job.get("io.serializations"),
    -        MutationSerialization.class.getName(), ResultSerialization.class.getName());
    +      MutationSerialization.class.getName(), ResultSerialization.class.getName());
         if (partitioner == HRegionPartitioner.class) {
           job.setPartitionerClass(HRegionPartitioner.class);
           int regions = getRegionCount(HBaseConfiguration.create(job), TableName.valueOf(table));
    @@ -319,11 +284,10 @@ public static void initCredentials(JobConf job) throws IOException {
       }
     
       /**
    -   * Ensures that the given number of reduce tasks for the given job
    -   * configuration does not exceed the number of regions for the given table.
    -   *
    -   * @param table  The table to get the region count for.
    -   * @param job  The current job configuration to adjust.
    +   * Ensures that the given number of reduce tasks for the given job configuration does not exceed
    +   * the number of regions for the given table.
    +   * @param table The table to get the region count for.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When retrieving the table details fails.
        */
       // Used by tests.
    @@ -335,11 +299,10 @@ public static void limitNumReduceTasks(String table, JobConf job) throws IOExcep
       }
     
       /**
    -   * Ensures that the given number of map tasks for the given job
    -   * configuration does not exceed the number of regions for the given table.
    -   *
    -   * @param table  The table to get the region count for.
    -   * @param job  The current job configuration to adjust.
    +   * Ensures that the given number of map tasks for the given job configuration does not exceed the
    +   * number of regions for the given table.
    +   * @param table The table to get the region count for.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When retrieving the table details fails.
        */
       // Used by tests.
    @@ -351,11 +314,10 @@ public static void limitNumMapTasks(String table, JobConf job) throws IOExceptio
       }
     
       /**
    -   * Sets the number of reduce tasks for the given job configuration to the
    -   * number of regions the given table has.
    -   *
    -   * @param table  The table to get the region count for.
    -   * @param job  The current job configuration to adjust.
    +   * Sets the number of reduce tasks for the given job configuration to the number of regions the
    +   * given table has.
    +   * @param table The table to get the region count for.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When retrieving the table details fails.
        */
       public static void setNumReduceTasks(String table, JobConf job) throws IOException {
    @@ -363,11 +325,10 @@ public static void setNumReduceTasks(String table, JobConf job) throws IOExcepti
       }
     
       /**
    -   * Sets the number of map tasks for the given job configuration to the
    -   * number of regions the given table has.
    -   *
    -   * @param table  The table to get the region count for.
    -   * @param job  The current job configuration to adjust.
    +   * Sets the number of map tasks for the given job configuration to the number of regions the given
    +   * table has.
    +   * @param table The table to get the region count for.
    +   * @param job The current job configuration to adjust.
        * @throws IOException When retrieving the table details fails.
        */
       public static void setNumMapTasks(String table, JobConf job) throws IOException {
    @@ -375,13 +336,11 @@ public static void setNumMapTasks(String table, JobConf job) throws IOException
       }
     
       /**
    -   * Sets the number of rows to return and cache with each scanner iteration.
    -   * Higher caching values will enable faster mapreduce jobs at the expense of
    -   * requiring more heap to contain the cached rows.
    -   *
    +   * Sets the number of rows to return and cache with each scanner iteration. Higher caching values
    +   * will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached
    +   * rows.
        * @param job The current job configuration to adjust.
    -   * @param batchSize The number of rows to return in batch with each scanner
    -   * iteration.
    +   * @param batchSize The number of rows to return in batch with each scanner iteration.
        */
       public static void setScannerCaching(JobConf job, int batchSize) {
         job.setInt("hbase.client.scanner.caching", batchSize);
    @@ -392,22 +351,17 @@ public static void setScannerCaching(JobConf job, int batchSize) {
        */
       public static void addDependencyJars(JobConf job) throws IOException {
         org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addHBaseDependencyJars(job);
    -    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(
    -      job,
    -      job.getMapOutputKeyClass(),
    -      job.getMapOutputValueClass(),
    -      job.getOutputKeyClass(),
    -      job.getOutputValueClass(),
    -      job.getPartitionerClass(),
    +    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(job,
    +      job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getOutputKeyClass(),
    +      job.getOutputValueClass(), job.getPartitionerClass(),
           job.getClass("mapred.input.format.class", TextInputFormat.class, InputFormat.class),
           job.getClass("mapred.output.format.class", TextOutputFormat.class, OutputFormat.class),
           job.getCombinerClass());
       }
     
    -
       private static int getRegionCount(Configuration conf, TableName tableName) throws IOException {
         try (Connection conn = ConnectionFactory.createConnection(conf);
    -      RegionLocator locator = conn.getRegionLocator(tableName)) {
    +        RegionLocator locator = conn.getRegionLocator(tableName)) {
           return locator.getAllRegionLocations().size();
         }
       }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    index fcf6f552b7ac..700ea1bfcc18 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableOutputFormat.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,11 +18,9 @@
     package org.apache.hadoop.hbase.mapred;
     
     import java.io.IOException;
    -
     import org.apache.hadoop.fs.FileAlreadyExistsException;
     import org.apache.hadoop.fs.FileSystem;
     import org.apache.hadoop.hbase.TableName;
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.BufferedMutator;
     import org.apache.hadoop.hbase.client.Connection;
     import org.apache.hadoop.hbase.client.ConnectionFactory;
    @@ -35,6 +32,7 @@
     import org.apache.hadoop.mapred.RecordWriter;
     import org.apache.hadoop.mapred.Reporter;
     import org.apache.hadoop.util.Progressable;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Convert Map/Reduce output and write it to an HBase table
    @@ -46,20 +44,18 @@ public class TableOutputFormat extends FileOutputFormat {
         private BufferedMutator m_mutator;
         private Connection conn;
     
    -
         /**
          * Instantiate a TableRecordWriter with the HBase HClient for writing.
    -     *
          * @deprecated since 2.0.0 and will be removed in 3.0.0. Please use
    -     *   {@code #TableRecordWriter(JobConf)} instead. This version does not clean up connections and
    -     *   will leak connections (removed in 2.0).
    +     *             {@code #TableRecordWriter(JobConf)} instead. This version does not clean up
    +     *             connections and will leak connections (removed in 2.0).
          * @see HBASE-16774
          */
         @Deprecated
    @@ -103,13 +99,10 @@ public void write(ImmutableBytesWritable key, Put value) throws IOException {
       }
     
       /**
    -   * Creates a new record writer.
    -   *
    -   * Be aware that the baseline javadoc gives the impression that there is a single
    -   * {@link RecordWriter} per job but in HBase, it is more natural if we give you a new
    +   * Creates a new record writer. Be aware that the baseline javadoc gives the impression that there
    +   * is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new
        * RecordWriter per call of this method. You must close the returned RecordWriter when done.
        * Failure to do so will drop writes.
    -   *
        * @param ignored Ignored filesystem
        * @param job Current JobConf
        * @param name Name of the job
    @@ -119,15 +112,14 @@ public void write(ImmutableBytesWritable key, Put value) throws IOException {
        */
       @Override
       public RecordWriter getRecordWriter(FileSystem ignored, JobConf job, String name,
    -      Progressable progress)
    -  throws IOException {
    +      Progressable progress) throws IOException {
         // Clear write buffer on fail is true by default so no need to reset it.
         return new TableRecordWriter(job);
       }
     
       @Override
       public void checkOutputSpecs(FileSystem ignored, JobConf job)
    -  throws FileAlreadyExistsException, InvalidJobConfException, IOException {
    +      throws FileAlreadyExistsException, InvalidJobConfException, IOException {
         String tableName = job.get(OUTPUT_TABLE);
         if (tableName == null) {
           throw new IOException("Must specify table name");
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
    index d56e18198635..114d443b2be6 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReader.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,27 +18,23 @@
     package org.apache.hadoop.hbase.mapred;
     
     import java.io.IOException;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Result;
     import org.apache.hadoop.hbase.client.Table;
     import org.apache.hadoop.hbase.filter.Filter;
     import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
     import org.apache.hadoop.mapred.RecordReader;
    -
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Iterate over an HBase table data, return (Text, RowResult) pairs
      */
     @InterfaceAudience.Public
    -public class TableRecordReader
    -implements RecordReader {
    +public class TableRecordReader implements RecordReader {
     
       private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl();
     
       /**
        * Restart from survivable exceptions by creating a new scanner.
    -   *
        * @param firstRow
        * @throws IOException
        */
    @@ -49,7 +44,6 @@ public void restart(byte[] firstRow) throws IOException {
     
       /**
        * Build the scanner. Not done in constructor to allow for extension.
    -   *
        * @throws IOException
        */
       public void init() throws IOException {
    @@ -66,22 +60,21 @@ public void setHTable(Table htable) {
       /**
        * @param inputColumns the columns to be placed in {@link Result}.
        */
    -  public void setInputColumns(final byte [][] inputColumns) {
    +  public void setInputColumns(final byte[][] inputColumns) {
         this.recordReaderImpl.setInputColumns(inputColumns);
       }
     
       /**
        * @param startRow the first row in the split
        */
    -  public void setStartRow(final byte [] startRow) {
    +  public void setStartRow(final byte[] startRow) {
         this.recordReaderImpl.setStartRow(startRow);
       }
     
       /**
    -   *
        * @param endRow the last row in the split
        */
    -  public void setEndRow(final byte [] endRow) {
    +  public void setEndRow(final byte[] endRow) {
         this.recordReaderImpl.setEndRow(endRow);
       }
     
    @@ -98,7 +91,6 @@ public void close() {
     
       /**
        * @return ImmutableBytesWritable
    -   *
        * @see org.apache.hadoop.mapred.RecordReader#createKey()
        */
       public ImmutableBytesWritable createKey() {
    @@ -107,7 +99,6 @@ public ImmutableBytesWritable createKey() {
     
       /**
        * @return RowResult
    -   *
        * @see org.apache.hadoop.mapred.RecordReader#createValue()
        */
       public Result createValue() {
    @@ -132,8 +123,7 @@ public float getProgress() {
        * @return true if there was more data
        * @throws IOException
        */
    -  public boolean next(ImmutableBytesWritable key, Result value)
    -  throws IOException {
    +  public boolean next(ImmutableBytesWritable key, Result value) throws IOException {
         return this.recordReaderImpl.next(key, value);
       }
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
    index 046da3aa1b89..bd6c8f0c1c2e 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableRecordReaderImpl.java
    @@ -1,5 +1,4 @@
     /*
    - *
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -19,6 +18,7 @@
     package org.apache.hadoop.hbase.mapred;
     
     import static org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl.LOG_PER_ROW_COUNT;
    +
     import java.io.IOException;
     import org.apache.hadoop.conf.Configuration;
     import org.apache.hadoop.hbase.DoNotRetryIOException;
    @@ -43,13 +43,13 @@
     public class TableRecordReaderImpl {
       private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class);
     
    -  private byte [] startRow;
    -  private byte [] endRow;
    -  private byte [] lastSuccessfulRow;
    +  private byte[] startRow;
    +  private byte[] endRow;
    +  private byte[] lastSuccessfulRow;
       private Filter trrRowFilter;
       private ResultScanner scanner;
       private Table htable;
    -  private byte [][] trrInputColumns;
    +  private byte[][] trrInputColumns;
       private long timestamp;
       private int rowcount;
       private boolean logScannerActivity = false;
    @@ -69,17 +69,15 @@ public void restart(byte[] firstRow) throws IOException {
             this.scanner = this.htable.getScanner(scan);
             currentScan = scan;
           } else {
    -        LOG.debug("TIFB.restart, firstRow: " +
    -            Bytes.toStringBinary(firstRow) + ", endRow: " +
    -            Bytes.toStringBinary(endRow));
    +        LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", endRow: "
    +            + Bytes.toStringBinary(endRow));
             Scan scan = new Scan(firstRow, endRow);
             TableInputFormat.addColumns(scan, trrInputColumns);
             this.scanner = this.htable.getScanner(scan);
             currentScan = scan;
           }
         } else {
    -      LOG.debug("TIFB.restart, firstRow: " +
    -          Bytes.toStringBinary(firstRow) + ", no endRow");
    +      LOG.debug("TIFB.restart, firstRow: " + Bytes.toStringBinary(firstRow) + ", no endRow");
     
           Scan scan = new Scan(firstRow);
           TableInputFormat.addColumns(scan, trrInputColumns);
    @@ -104,13 +102,14 @@ public void init() throws IOException {
       byte[] getStartRow() {
         return this.startRow;
       }
    +
       /**
        * @param htable the {@link org.apache.hadoop.hbase.HTableDescriptor} to scan.
        */
       public void setHTable(Table htable) {
         Configuration conf = htable.getConfiguration();
         logScannerActivity = conf.getBoolean(
    -      "hbase.client.log.scanner.activity" /*ScannerCallable.LOG_SCANNER_ACTIVITY*/, false);
    +      "hbase.client.log.scanner.activity" /* ScannerCallable.LOG_SCANNER_ACTIVITY */, false);
         logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
         this.htable = htable;
       }
    @@ -118,22 +117,21 @@ public void setHTable(Table htable) {
       /**
        * @param inputColumns the columns to be placed in {@link Result}.
        */
    -  public void setInputColumns(final byte [][] inputColumns) {
    +  public void setInputColumns(final byte[][] inputColumns) {
         this.trrInputColumns = inputColumns;
       }
     
       /**
        * @param startRow the first row in the split
        */
    -  public void setStartRow(final byte [] startRow) {
    +  public void setStartRow(final byte[] startRow) {
         this.startRow = startRow;
       }
     
       /**
    -   *
        * @param endRow the last row in the split
        */
    -  public void setEndRow(final byte [] endRow) {
    +  public void setEndRow(final byte[] endRow) {
         this.endRow = endRow;
       }
     
    @@ -157,7 +155,6 @@ public void close() {
     
       /**
        * @return ImmutableBytesWritable
    -   *
        * @see org.apache.hadoop.mapred.RecordReader#createKey()
        */
       public ImmutableBytesWritable createKey() {
    @@ -166,7 +163,6 @@ public ImmutableBytesWritable createKey() {
     
       /**
        * @return RowResult
    -   *
        * @see org.apache.hadoop.mapred.RecordReader#createValue()
        */
       public Result createValue() {
    @@ -195,11 +191,10 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
           try {
             result = this.scanner.next();
             if (logScannerActivity) {
    -          rowcount ++;
    +          rowcount++;
               if (rowcount >= logPerRowCount) {
                 long now = EnvironmentEdgeManager.currentTime();
    -            LOG.info("Mapper took " + (now-timestamp)
    -              + "ms to process " + rowcount + " rows");
    +            LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows");
                 timestamp = now;
                 rowcount = 0;
               }
    @@ -213,16 +208,16 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
             // the scanner, if the second call fails, it will be rethrown
             LOG.debug("recovered from " + StringUtils.stringifyException(e));
             if (lastSuccessfulRow == null) {
    -          LOG.warn("We are restarting the first next() invocation," +
    -              " if your mapper has restarted a few other times like this" +
    -              " then you should consider killing this job and investigate" +
    -              " why it's taking so long.");
    +          LOG.warn("We are restarting the first next() invocation,"
    +              + " if your mapper has restarted a few other times like this"
    +              + " then you should consider killing this job and investigate"
    +              + " why it's taking so long.");
             }
             if (lastSuccessfulRow == null) {
               restart(startRow);
             } else {
               restart(lastSuccessfulRow);
    -          this.scanner.next();    // skip presumed already mapped row
    +          this.scanner.next(); // skip presumed already mapped row
             }
             result = this.scanner.next();
           }
    @@ -237,11 +232,10 @@ public boolean next(ImmutableBytesWritable key, Result value) throws IOException
         } catch (IOException ioe) {
           if (logScannerActivity) {
             long now = EnvironmentEdgeManager.currentTime();
    -        LOG.info("Mapper took " + (now-timestamp)
    -          + "ms to process " + rowcount + " rows");
    +        LOG.info("Mapper took " + (now - timestamp) + "ms to process " + rowcount + " rows");
             LOG.info(ioe.toString(), ioe);
    -        String lastRow = lastSuccessfulRow == null ?
    -          "null" : Bytes.toStringBinary(lastSuccessfulRow);
    +        String lastRow =
    +            lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow);
             LOG.info("lastSuccessfulRow=" + lastRow);
           }
           throw ioe;
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java
    index a64e4cdc82f9..c1f55f3c16ee 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableReduce.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -18,21 +17,20 @@
      */
     package org.apache.hadoop.hbase.mapred;
     
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Put;
     import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
     import org.apache.hadoop.io.WritableComparable;
     import org.apache.hadoop.mapred.Reducer;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * Write a table, sorting by the input key
    - *
      * @param  key class
      * @param  value class
      */
     @InterfaceAudience.Public
     @SuppressWarnings("unchecked")
     public interface TableReduce
    -extends Reducer {
    +    extends Reducer {
     
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java
    index 1bacb89f5651..a59e464c552d 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSnapshotInputFormat.java
    @@ -1,4 +1,4 @@
    -/**
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -15,13 +15,15 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -
     package org.apache.hadoop.hbase.mapred;
     
    +import java.io.DataInput;
    +import java.io.DataOutput;
    +import java.io.IOException;
    +import java.util.List;
     import org.apache.hadoop.fs.Path;
     import org.apache.hadoop.hbase.HRegionInfo;
     import org.apache.hadoop.hbase.HTableDescriptor;
    -import org.apache.yetus.audience.InterfaceAudience;
     import org.apache.hadoop.hbase.client.Result;
     import org.apache.hadoop.hbase.client.Scan;
     import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
    @@ -32,16 +34,11 @@
     import org.apache.hadoop.mapred.JobConf;
     import org.apache.hadoop.mapred.RecordReader;
     import org.apache.hadoop.mapred.Reporter;
    -
    -import java.io.DataInput;
    -import java.io.DataOutput;
    -import java.io.IOException;
    -import java.util.List;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. Further
      * documentation available on {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}.
    - *
      * @see org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat
      */
     @InterfaceAudience.Public
    @@ -86,8 +83,7 @@ public void readFields(DataInput in) throws IOException {
         }
       }
     
    -  static class TableSnapshotRecordReader
    -    implements RecordReader {
    +  static class TableSnapshotRecordReader implements RecordReader {
     
         private TableSnapshotInputFormatImpl.RecordReader delegate;
     
    @@ -137,7 +133,7 @@ public float getProgress() throws IOException {
       @Override
       public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
         List splits =
    -      TableSnapshotInputFormatImpl.getSplits(job);
    +        TableSnapshotInputFormatImpl.getSplits(job);
         InputSplit[] results = new InputSplit[splits.size()];
         for (int i = 0; i < splits.size(); i++) {
           results[i] = new TableSnapshotRegionSplit(splits.get(i));
    @@ -146,8 +142,8 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
       }
     
       @Override
    -  public RecordReader
    -  getRecordReader(InputSplit split, JobConf job, Reporter reporter) throws IOException {
    +  public RecordReader getRecordReader(InputSplit split, JobConf job,
    +      Reporter reporter) throws IOException {
         return new TableSnapshotRecordReader((TableSnapshotRegionSplit) split, job);
       }
     
    @@ -155,9 +151,9 @@ public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
        * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
        * @param job the job to configure
        * @param snapshotName the name of the snapshot to read from
    -   * @param restoreDir a temporary directory to restore the snapshot into. Current user should
    -   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
    -   * After the job is finished, restoreDir can be deleted.
    +   * @param restoreDir a temporary directory to restore the snapshot into. Current user should have
    +   *          write permissions to this directory, and this should not be a subdirectory of rootdir.
    +   *          After the job is finished, restoreDir can be deleted.
        * @throws IOException if an error occurs
        */
       public static void setInput(JobConf job, String snapshotName, Path restoreDir)
    @@ -169,15 +165,16 @@ public static void setInput(JobConf job, String snapshotName, Path restoreDir)
        * Configures the job to use TableSnapshotInputFormat to read from a snapshot.
        * @param job the job to configure
        * @param snapshotName the name of the snapshot to read from
    -   * @param restoreDir a temporary directory to restore the snapshot into. Current user should
    -   * have write permissions to this directory, and this should not be a subdirectory of rootdir.
    -   * After the job is finished, restoreDir can be deleted.
    +   * @param restoreDir a temporary directory to restore the snapshot into. Current user should have
    +   *          write permissions to this directory, and this should not be a subdirectory of rootdir.
    +   *          After the job is finished, restoreDir can be deleted.
        * @param splitAlgo split algorithm to generate splits from region
        * @param numSplitsPerRegion how many input splits to generate per one region
        * @throws IOException if an error occurs
        */
       public static void setInput(JobConf job, String snapshotName, Path restoreDir,
    -                              RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
    -    TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo, numSplitsPerRegion);
    +      RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException {
    +    TableSnapshotInputFormatImpl.setInput(job, snapshotName, restoreDir, splitAlgo,
    +      numSplitsPerRegion);
       }
     }
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
    index d6e663730a7b..0e3ca25de812 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/TableSplit.java
    @@ -1,5 +1,4 @@
    -/**
    - *
    +/*
      * Licensed to the Apache Software Foundation (ASF) under one
      * or more contributor license agreements.  See the NOTICE file
      * distributed with this work for additional information
    @@ -22,12 +21,11 @@
     import java.io.DataOutput;
     import java.io.IOException;
     import java.util.Arrays;
    -
    -import org.apache.yetus.audience.InterfaceAudience;
    -import org.apache.hadoop.hbase.TableName;
     import org.apache.hadoop.hbase.HConstants;
    +import org.apache.hadoop.hbase.TableName;
     import org.apache.hadoop.hbase.util.Bytes;
     import org.apache.hadoop.mapred.InputSplit;
    +import org.apache.yetus.audience.InterfaceAudience;
     
     /**
      * A table split corresponds to a key range [low, high)
    @@ -35,14 +33,13 @@
     @InterfaceAudience.Public
     public class TableSplit implements InputSplit, Comparable {
       private TableName m_tableName;
    -  private byte [] m_startRow;
    -  private byte [] m_endRow;
    +  private byte[] m_startRow;
    +  private byte[] m_endRow;
       private String m_regionLocation;
     
       /** default constructor */
       public TableSplit() {
    -    this((TableName)null, HConstants.EMPTY_BYTE_ARRAY,
    -      HConstants.EMPTY_BYTE_ARRAY, "");
    +    this((TableName) null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, "");
       }
     
       /**
    @@ -52,18 +49,15 @@ public TableSplit() {
        * @param endRow
        * @param location
        */
    -  public TableSplit(TableName tableName, byte [] startRow, byte [] endRow,
    -      final String location) {
    +  public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) {
         this.m_tableName = tableName;
         this.m_startRow = startRow;
         this.m_endRow = endRow;
         this.m_regionLocation = location;
       }
     
    -  public TableSplit(byte [] tableName, byte [] startRow, byte [] endRow,
    -      final String location) {
    -    this(TableName.valueOf(tableName), startRow, endRow,
    -      location);
    +  public TableSplit(byte[] tableName, byte[] startRow, byte[] endRow, final String location) {
    +    this(TableName.valueOf(tableName), startRow, endRow, location);
       }
     
       /** @return table name */
    @@ -72,17 +66,17 @@ public TableName getTable() {
       }
     
       /** @return table name */
    -   public byte [] getTableName() {
    -     return this.m_tableName.getName();
    -   }
    +  public byte[] getTableName() {
    +    return this.m_tableName.getName();
    +  }
     
       /** @return starting row key */
    -  public byte [] getStartRow() {
    +  public byte[] getStartRow() {
         return this.m_startRow;
       }
     
       /** @return end row key */
    -  public byte [] getEndRow() {
    +  public byte[] getEndRow() {
         return this.m_endRow;
       }
     
    @@ -92,7 +86,7 @@ public String getRegionLocation() {
       }
     
       public String[] getLocations() {
    -    return new String[] {this.m_regionLocation};
    +    return new String[] { this.m_regionLocation };
       }
     
       public long getLength() {
    @@ -116,14 +110,14 @@ public void write(DataOutput out) throws IOException {
     
       @Override
       public String toString() {
    -      StringBuilder sb = new StringBuilder();
    -      sb.append("HBase table split(");
    -      sb.append("table name: ").append(m_tableName);
    -      sb.append(", start row: ").append(Bytes.toStringBinary(m_startRow));
    -      sb.append(", end row: ").append(Bytes.toStringBinary(m_endRow));
    -      sb.append(", region location: ").append(m_regionLocation);
    -      sb.append(")");
    -      return sb.toString();
    +    StringBuilder sb = new StringBuilder();
    +    sb.append("HBase table split(");
    +    sb.append("table name: ").append(m_tableName);
    +    sb.append(", start row: ").append(Bytes.toStringBinary(m_startRow));
    +    sb.append(", end row: ").append(Bytes.toStringBinary(m_endRow));
    +    sb.append(", region location: ").append(m_regionLocation);
    +    sb.append(")");
    +    return sb.toString();
       }
     
       @Override
    @@ -136,11 +130,10 @@ public boolean equals(Object o) {
         if (o == null || !(o instanceof TableSplit)) {
           return false;
         }
    -    TableSplit other = (TableSplit)o;
    -    return m_tableName.equals(other.m_tableName) &&
    -      Bytes.equals(m_startRow, other.m_startRow) &&
    -      Bytes.equals(m_endRow, other.m_endRow) &&
    -      m_regionLocation.equals(other.m_regionLocation);
    +    TableSplit other = (TableSplit) o;
    +    return m_tableName.equals(other.m_tableName) && Bytes.equals(m_startRow, other.m_startRow)
    +        && Bytes.equals(m_endRow, other.m_endRow)
    +        && m_regionLocation.equals(other.m_regionLocation);
       }
     
       @Override
    diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
    index 1da3a527381d..922c761c8386 100644
    --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
    +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapred/package-info.java
    @@ -1,26 +1,18 @@
     /*
    - *
    - * Licensed to the Apache Software Foundation (ASF) under one
    - * or more contributor license agreements.  See the NOTICE file
    - * distributed with this work for additional information
    - * regarding copyright ownership.  The ASF licenses this file
    - * to you under the Apache License, Version 2.0 (the
    - * "License"); you may not use this file except in compliance
    - * with the License.  You may obtain a copy of the License at
    - *
    - *     http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
    + * agreements. See the NOTICE file distributed with this work for additional information regarding
    + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance with the License. You may obtain a
    + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable
    + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS"
    + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License
    + * for the specific language governing permissions and limitations under the License.
      */
     /**
    -Provides HBase MapReduce
    -Input/OutputFormats, a table indexing MapReduce job, and utility methods.
    -
    -

    See HBase and MapReduce -in the HBase Reference Guide for mapreduce over hbase documentation. -*/ + * Provides HBase MapReduce + * Input/OutputFormats, a table indexing MapReduce job, and utility methods. + *

    + * See HBase and MapReduce in the HBase + * Reference Guide for mapreduce over hbase documentation. + */ package org.apache.hadoop.hbase.mapred; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java index ac7393a27f2b..5f3166f14b1a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,18 +18,14 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.hadoop.hbase.CompareOperator; -import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; +import org.apache.hadoop.hbase.CompareOperator; import org.apache.hadoop.hbase.HBaseConfiguration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.Filter; @@ -47,12 +42,16 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * A job with a a map and reduce phase to count cells in a table. - * The counter lists the following stats for a given table: + * A job with a a map and reduce phase to count cells in a table. The counter lists the following + * stats for a given table: + * *

      * 1. Total number of rows in the table
      * 2. Total number of CFs across all rows
    @@ -65,17 +64,14 @@
      * 9. Total size of serialized cells across all rows.
      * 
    * - * The cellcounter can take optional parameters to use a user - * supplied row/family/qualifier string to use in the report and - * second a regex based or prefix based row filter to restrict the - * count operation to a limited subset of rows from the table or a - * start time and/or end time to limit the count to a time range. + * The cellcounter can take optional parameters to use a user supplied row/family/qualifier string + * to use in the report and second a regex based or prefix based row filter to restrict the count + * operation to a limited subset of rows from the table or a start time and/or end time to limit the + * count to a time range. */ @InterfaceAudience.Public public class CellCounter extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(CellCounter.class.getName()); - + private static final Logger LOG = LoggerFactory.getLogger(CellCounter.class.getName()); /** * Name of this 'program'. @@ -87,15 +83,12 @@ public class CellCounter extends Configured implements Tool { /** * Mapper that runs the count. */ - static class CellCounterMapper - extends TableMapper { + static class CellCounterMapper extends TableMapper { /** * Counter enumeration to count the actual rows. */ public static enum Counters { - ROWS, - CELLS, - SIZE + ROWS, CELLS, SIZE } private Configuration conf; @@ -117,26 +110,22 @@ public static enum Counters { @Override protected void setup(Context context) throws IOException, InterruptedException { conf = context.getConfiguration(); - separator = conf.get("ReportSeparator",":"); + separator = conf.get("ReportSeparator", ":"); } /** * Maps the data. - * - * @param row The current table row key. - * @param values The columns. + * @param row The current table row key. + * @param values The columns. * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", - justification="Findbugs is blind to the Precondition null check") - public void map(ImmutableBytesWritable row, Result values, - Context context) - throws IOException { - Preconditions.checkState(values != null, - "values passed to the map is null"); + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", + justification = "Findbugs is blind to the Precondition null check") + public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { + Preconditions.checkState(values != null, "values passed to the map is null"); try { byte[] currentRow = values.getRow(); @@ -167,14 +156,13 @@ public void map(ImmutableBytesWritable row, Result values, context.getCounter("CF", currentFamilyName + "_Size").increment(size); context.write(new Text(currentFamilyName + "_Size"), new LongWritable(size)); } - if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)){ + if (currentQualifier == null || !CellUtil.matchingQualifier(value, currentQualifier)) { currentQualifier = CellUtil.cloneQualifier(value); - currentQualifierName = currentFamilyName + separator + - Bytes.toStringBinary(currentQualifier); + currentQualifierName = + currentFamilyName + separator + Bytes.toStringBinary(currentQualifier); currentRowQualifierName = currentRowKey + separator + currentQualifierName; - context.write(new Text("Total Qualifiers across all Rows"), - new LongWritable(1)); + context.write(new Text("Total Qualifiers across all Rows"), new LongWritable(1)); context.write(new Text(currentQualifierName), new LongWritable(1)); context.getCounter("Q", currentQualifierName + "_Size").increment(size); context.write(new Text(currentQualifierName + "_Size"), new LongWritable(size)); @@ -208,23 +196,21 @@ public void reduce(Key key, Iterable values, Context context) /** * Sets up the actual job. - * * @param conf The current configuration. * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; Path outputDir = new Path(args[1]); - String reportSeparatorString = (args.length > 2) ? args[2]: ":"; + String reportSeparatorString = (args.length > 2) ? args[2] : ":"; conf.set("ReportSeparator", reportSeparatorString); Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName)); job.setJarByClass(CellCounter.class); Scan scan = getConfiguredScanForJob(conf, args); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - CellCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, CellCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(LongWritable.class); job.setOutputFormatClass(TextOutputFormat.class); @@ -248,7 +234,7 @@ private static Scan getConfiguredScanForJob(Configuration conf, String[] args) s.setCacheBlocks(false); // Set RowFilter or Prefix Filter if applicable. Filter rowFilter = getRowFilter(args); - if (rowFilter!= null) { + if (rowFilter != null) { LOG.info("Setting Row Filter for counter."); s.setFilter(rowFilter); } @@ -261,10 +247,9 @@ private static Scan getConfiguredScanForJob(Configuration conf, String[] args) return s; } - private static Filter getRowFilter(String[] args) { Filter rowFilter = null; - String filterCriteria = (args.length > 3) ? args[3]: null; + String filterCriteria = (args.length > 3) ? args[3] : null; if (filterCriteria == null) return null; if (filterCriteria.startsWith("^")) { String regexPattern = filterCriteria.substring(1, filterCriteria.length()); @@ -291,11 +276,10 @@ private static long[] getTimeRange(String[] args) throws IOException { } } - if (startTime == 0 && endTime == 0) - return null; + if (startTime == 0 && endTime == 0) return null; endTime = endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime; - return new long [] {startTime, endTime}; + return new long[] { startTime, endTime }; } @Override @@ -318,8 +302,7 @@ private void printUsage(int parameterCount) { System.err.println(" -D" + TableInputFormat.SCAN_ROW_START + "="); System.err.println(" -D" + TableInputFormat.SCAN_ROW_STOP + "="); System.err.println(" -D" + TableInputFormat.SCAN_COLUMNS + "=\" ...\""); - System.err.println(" -D" + TableInputFormat.SCAN_COLUMN_FAMILY - + "=,, ..."); + System.err.println(" -D" + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); System.err.println(" -D" + TableInputFormat.SCAN_TIMESTAMP + "="); System.err.println(" -D" + TableInputFormat.SCAN_TIMERANGE_START + "="); System.err.println(" -D" + TableInputFormat.SCAN_TIMERANGE_END + "="); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java index 6c69651d0a43..38959964ef44 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellCreator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,13 +19,12 @@ import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Facade to create Cells for HFileOutputFormat. The created Cells are of Put type. @@ -39,9 +38,9 @@ public class CellCreator { private VisibilityExpressionResolver visExpResolver; public CellCreator(Configuration conf) { - Class clazz = conf.getClass( - VISIBILITY_EXP_RESOLVER_CLASS, DefaultVisibilityExpressionResolver.class, - VisibilityExpressionResolver.class); + Class clazz = + conf.getClass(VISIBILITY_EXP_RESOLVER_CLASS, DefaultVisibilityExpressionResolver.class, + VisibilityExpressionResolver.class); this.visExpResolver = ReflectionUtils.newInstance(clazz, conf); this.visExpResolver.init(); } @@ -67,7 +66,7 @@ public Cell create(byte[] row, int roffset, int rlength, byte[] family, int foff byte[] qualifier, int qoffset, int qlength, long timestamp, byte[] value, int voffset, int vlength) throws IOException { return create(row, roffset, rlength, family, foffset, flength, qualifier, qoffset, qlength, - timestamp, value, voffset, vlength, (List)null); + timestamp, value, voffset, vlength, (List) null); } /** diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java index 6dac6f7dd59a..9b8b5ed68e99 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,16 +22,15 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; /** * Use to specify the type of serialization for the mappers and reducers @@ -63,7 +62,7 @@ public void close() throws IOException { @Override public KeyValue deserialize(Cell ignore) throws IOException { - // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO + // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO return KeyValueUtil.create(this.dis); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java index de961cf35458..5cc1e8cce848 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CellSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +19,6 @@ import java.io.IOException; import java.util.TreeSet; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.PrivateCellUtil; @@ -30,10 +28,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted Cells. - * Reads in all Cells from passed Iterator, sorts them, then emits - * Cells in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted Cells. Reads in all Cells from passed Iterator, sorts them, then emits Cells in + * sorted order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 */ @InterfaceAudience.Public @@ -41,7 +37,7 @@ public class CellSortReducer extends Reducer { protected void reduce(ImmutableBytesWritable row, Iterable kvs, Reducer.Context context) - throws java.io.IOException, InterruptedException { + throws java.io.IOException, InterruptedException { TreeSet map = new TreeSet<>(CellComparator.getInstance()); for (Cell kv : kvs) { try { @@ -52,7 +48,7 @@ protected void reduce(ImmutableBytesWritable row, Iterable kvs, } context.setStatus("Read " + map.getClass()); int index = 0; - for (Cell kv: map) { + for (Cell kv : map) { context.write(row, new MapReduceExtendedCell(kv)); if (++index % 100 == 0) context.setStatus("Wrote " + index); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java index bde3519d37cc..f2abf12286bb 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/CopyTable.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,9 +44,9 @@ import org.slf4j.LoggerFactory; /** - * Tool used to copy a table to another one which can be on a different setup. - * It is also configurable with a start and time as well as a specification - * of the region server implementation if different from the local cluster. + * Tool used to copy a table to another one which can be on a different setup. It is also + * configurable with a start and time as well as a specification of the region server implementation + * if different from the local cluster. */ @InterfaceAudience.Public public class CopyTable extends Configured implements Tool { @@ -101,8 +100,7 @@ private void initCopyTableMapperReducerJob(Job job, Scan scan) throws IOExceptio /** * Sets up the actual job. - * - * @param args The command line parameters. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -145,20 +143,20 @@ public Job createSubmittableJob(String[] args) throws IOException { scan.withStopRow(Bytes.toBytesBinary(stopRow)); } - if(families != null) { + if (families != null) { String[] fams = families.split(","); - Map cfRenameMap = new HashMap<>(); - for(String fam : fams) { + Map cfRenameMap = new HashMap<>(); + for (String fam : fams) { String sourceCf; - if(fam.contains(":")) { - // fam looks like "sourceCfName:destCfName" - String[] srcAndDest = fam.split(":", 2); - sourceCf = srcAndDest[0]; - String destCf = srcAndDest[1]; - cfRenameMap.put(sourceCf, destCf); + if (fam.contains(":")) { + // fam looks like "sourceCfName:destCfName" + String[] srcAndDest = fam.split(":", 2); + sourceCf = srcAndDest[0]; + String destCf = srcAndDest[1]; + cfRenameMap.put(sourceCf, destCf); } else { - // fam is just "sourceCf" - sourceCf = fam; + // fam is just "sourceCf" + sourceCf = fam; } scan.addFamily(Bytes.toBytes(sourceCf)); } @@ -190,14 +188,14 @@ public Job createSubmittableJob(String[] args) throws IOException { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " + - "[--new.name=NEW] [--peer.adr=ADR] "); + System.err.println("Usage: CopyTable [general options] [--starttime=X] [--endtime=Y] " + + "[--new.name=NEW] [--peer.adr=ADR] "); System.err.println(); System.err.println("Options:"); System.err.println(" rs.class hbase.regionserver.class of the peer cluster"); @@ -217,18 +215,19 @@ private static void printUsage(final String errorMsg) { System.err.println(" To copy from cf1 to cf2, give sourceCfName:destCfName. "); System.err.println(" To keep the same name, just give \"cfName\""); System.err.println(" all.cells also copy delete markers and deleted cells"); - System.err.println(" bulkload Write input into HFiles and bulk load to the destination " - + "table"); + System.err.println( + " bulkload Write input into HFiles and bulk load to the destination " + "table"); System.err.println(" snapshot Copy the data from snapshot to destination table."); System.err.println(); System.err.println("Args:"); System.err.println(" tablename Name of the table to copy"); System.err.println(); System.err.println("Examples:"); - System.err.println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " + - "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable "); + System.err + .println(" To copy 'TestTable' to a cluster that uses replication for a 1 hour window:"); + System.err.println(" $ hbase " + + "org.apache.hadoop.hbase.mapreduce.CopyTable --starttime=1265875194289 --endtime=1265878794289 " + + "--peer.adr=server1,server2,server3:2181:/hbase --families=myOldCf:myNewCf,cf2,cf3 TestTable "); System.err.println(" To copy data from 'sourceTableSnapshot' to 'destTable': "); System.err.println(" $ hbase org.apache.hadoop.hbase.mapreduce.CopyTable " + "--snapshot --new.name=destTable sourceTableSnapshot"); @@ -240,8 +239,7 @@ private static void printUsage(final String errorMsg) { + " decreases the round trip time to the server and may increase performance.\n" + " -Dhbase.client.scanner.caching=100\n" + " The following should always be set to false, to prevent writing data twice, which may produce \n" - + " inaccurate results.\n" - + " -Dmapreduce.map.speculative=false"); + + " inaccurate results.\n" + " -Dmapreduce.map.speculative=false"); } private boolean doCommandLine(final String[] args) { @@ -332,7 +330,7 @@ private boolean doCommandLine(final String[] args) { continue; } - if(cmd.startsWith("--snapshot")){ + if (cmd.startsWith("--snapshot")) { readingSnapshot = true; continue; } @@ -392,8 +390,7 @@ private boolean doCommandLine(final String[] args) { /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java index 07f05dd79804..df1433086d85 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/DefaultVisibilityExpressionResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,13 +25,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.TableNotFoundException; import org.apache.hadoop.hbase.Tag; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Result; @@ -43,6 +39,9 @@ import org.apache.hadoop.hbase.security.visibility.VisibilityLabelOrdinalProvider; import org.apache.hadoop.hbase.security.visibility.VisibilityUtils; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * This implementation creates tags by expanding expression using label ordinal. Labels will be @@ -111,12 +110,11 @@ public void init() { LOG.warn("Error closing 'labels' table", ioe); } } - if (connection != null) - try { - connection.close(); - } catch (IOException ioe) { - LOG.warn("Failed close of temporary connection", ioe); - } + if (connection != null) try { + connection.close(); + } catch (IOException ioe) { + LOG.warn("Failed close of temporary connection", ioe); + } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java index ed31c8422e7e..fa6957e2ed1a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Driver.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,39 +27,36 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Driver for hbase mapreduce jobs. Select which to run by passing - * name of job to this main. + * Driver for hbase mapreduce jobs. Select which to run by passing name of job to this main. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) @InterfaceStability.Stable public class Driver { - private Driver() {} + private Driver() { + } public static void main(String[] args) throws Throwable { ProgramDriver pgd = new ProgramDriver(); - pgd.addClass(RowCounter.NAME, RowCounter.class, - "Count rows in HBase table."); - pgd.addClass(CellCounter.NAME, CellCounter.class, - "Count cells in HBase table."); + pgd.addClass(RowCounter.NAME, RowCounter.class, "Count rows in HBase table."); + pgd.addClass(CellCounter.NAME, CellCounter.class, "Count cells in HBase table."); pgd.addClass(Export.NAME, Export.class, "Write table data to HDFS."); pgd.addClass(Import.NAME, Import.class, "Import data written by Export."); pgd.addClass(ImportTsv.NAME, ImportTsv.class, "Import data in TSV format."); - pgd.addClass(BulkLoadHFilesTool.NAME, BulkLoadHFilesTool.class, - "Complete a bulk data load."); + pgd.addClass(BulkLoadHFilesTool.NAME, BulkLoadHFilesTool.class, "Complete a bulk data load."); pgd.addClass(CopyTable.NAME, CopyTable.class, - "Export a table from local cluster to peer cluster."); - pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, "Compare" + - " data from tables in two different clusters. It" + - " doesn't work for incrementColumnValues'd cells since" + - " timestamp is changed after appending to WAL."); + "Export a table from local cluster to peer cluster."); + pgd.addClass(VerifyReplication.NAME, VerifyReplication.class, + "Compare" + " data from tables in two different clusters. It" + + " doesn't work for incrementColumnValues'd cells since" + + " timestamp is changed after appending to WAL."); pgd.addClass(WALPlayer.NAME, WALPlayer.class, "Replay WAL files."); - pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, "Export" + - " the specific snapshot to a given FileSystem."); - pgd.addClass(MobRefReporter.NAME, MobRefReporter.class, "Check the mob cells in a particular " + - "table and cf and confirm that the files they point to are correct."); + pgd.addClass(ExportSnapshot.NAME, ExportSnapshot.class, + "Export" + " the specific snapshot to a given FileSystem."); + pgd.addClass(MobRefReporter.NAME, MobRefReporter.class, "Check the mob cells in a particular " + + "table and cf and confirm that the files they point to are correct."); - ProgramDriver.class.getMethod("driver", new Class [] {String[].class}). - invoke(pgd, new Object[]{args}); + ProgramDriver.class.getMethod("driver", new Class[] { String[].class }).invoke(pgd, + new Object[] { args }); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java index eb0f649e643b..3e02114a3bb0 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Export.java @@ -1,33 +1,31 @@ -/** -* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; -import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Triple; import org.apache.hadoop.mapreduce.Job; @@ -38,8 +36,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Export an HBase table. - * Writes content to sequence files up in HDFS. Use {@link Import} to read it + * Export an HBase table. Writes content to sequence files up in HDFS. Use {@link Import} to read it * back in again. */ @InterfaceAudience.Public @@ -49,14 +46,12 @@ public class Export extends Configured implements Tool { /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { Triple arguments = ExportUtils.getArgumentsFromCommandLine(conf, args); String tableName = arguments.getFirst().getNameAsString(); Path outputDir = arguments.getThird(); @@ -66,12 +61,13 @@ public static Job createSubmittableJob(Configuration conf, String[] args) // Set optional scan parameters Scan s = arguments.getSecond(); IdentityTableMapper.initJob(tableName, s, IdentityTableMapper.class, job); - // No reducers. Just write straight to output files. + // No reducers. Just write straight to output files. job.setNumReduceTasks(0); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(Result.class); - FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't have a default fs. + FileOutputFormat.setOutputPath(job, outputDir); // job conf doesn't contain the conf so doesn't + // have a default fs. return job; } @@ -80,7 +76,7 @@ public int run(String[] args) throws Exception { if (!ExportUtils.isValidArguements(args)) { ExportUtils.usage("Wrong number of arguments: " + ArrayUtils.getLength(args)); System.err.println(" -D " + JOB_NAME_CONF_KEY - + "=jobName - use the specified mapreduce job name for the export"); + + "=jobName - use the specified mapreduce job name for the export"); System.err.println("For MR performance consider the following properties:"); System.err.println(" -D mapreduce.map.speculative=false"); System.err.println(" -D mapreduce.reduce.speculative=false"); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java index 75feab5e4bb6..d2cc858e5e8e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ExportUtils.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.CompareOperator; @@ -42,8 +40,8 @@ import org.slf4j.LoggerFactory; /** - * Some helper methods are used by {@link org.apache.hadoop.hbase.mapreduce.Export} - * and org.apache.hadoop.hbase.coprocessor.Export (in hbase-endpooint). + * Some helper methods are used by {@link org.apache.hadoop.hbase.mapreduce.Export} and + * org.apache.hadoop.hbase.coprocessor.Export (in hbase-endpooint). */ @InterfaceAudience.Private public final class ExportUtils { @@ -52,37 +50,39 @@ public final class ExportUtils { public static final String EXPORT_BATCHING = "hbase.export.scanner.batch"; public static final String EXPORT_CACHING = "hbase.export.scanner.caching"; public static final String EXPORT_VISIBILITY_LABELS = "hbase.export.visibility.labels"; + /** * Common usage for other export tools. - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ public static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - System.err.println("Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]\n"); + System.err.println("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]\n"); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" For example: "); System.err.println(" -D " + FileOutputFormat.COMPRESS + "=true"); - System.err.println(" -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec"); + System.err.println( + " -D " + FileOutputFormat.COMPRESS_CODEC + "=org.apache.hadoop.io.compress.GzipCodec"); System.err.println(" -D " + FileOutputFormat.COMPRESS_TYPE + "=BLOCK"); System.err.println(" Additionally, the following SCAN properties can be specified"); System.err.println(" to control/limit what is exported.."); - System.err.println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); + System.err + .println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=,, ..."); System.err.println(" -D " + RAW_SCAN + "=true"); System.err.println(" -D " + TableInputFormat.SCAN_ROW_START + "="); System.err.println(" -D " + TableInputFormat.SCAN_ROW_STOP + "="); System.err.println(" -D " + HConstants.HBASE_CLIENT_SCANNER_CACHING + "=100"); System.err.println(" -D " + EXPORT_VISIBILITY_LABELS + "="); System.err.println("For tables with very wide rows consider setting the batch size as below:\n" - + " -D " + EXPORT_BATCHING + "=10\n" - + " -D " + EXPORT_CACHING + "=100"); + + " -D " + EXPORT_BATCHING + "=10\n" + " -D " + EXPORT_CACHING + "=100"); } private static Filter getExportFilter(String[] args) { Filter exportFilter; - String filterCriteria = (args.length > 5) ? args[5]: null; + String filterCriteria = (args.length > 5) ? args[5] : null; if (filterCriteria == null) return null; if (filterCriteria.startsWith("^")) { String regexPattern = filterCriteria.substring(1, filterCriteria.length()); @@ -97,23 +97,24 @@ public static boolean isValidArguements(String[] args) { return args != null && args.length >= 2; } - public static Triple getArgumentsFromCommandLine( - Configuration conf, String[] args) throws IOException { + public static Triple getArgumentsFromCommandLine(Configuration conf, + String[] args) throws IOException { if (!isValidArguements(args)) { return null; } - return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), new Path(args[1])); + return new Triple<>(TableName.valueOf(args[0]), getScanFromCommandLine(conf, args), + new Path(args[1])); } static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOException { Scan s = new Scan(); // Optional arguments. // Set Scan Versions - int versions = args.length > 2? Integer.parseInt(args[2]): 1; + int versions = args.length > 2 ? Integer.parseInt(args[2]) : 1; s.setMaxVersions(versions); // Set Scan Range - long startTime = args.length > 3? Long.parseLong(args[3]): 0L; - long endTime = args.length > 4? Long.parseLong(args[4]): Long.MAX_VALUE; + long startTime = args.length > 3 ? Long.parseLong(args[3]) : 0L; + long endTime = args.length > 4 ? Long.parseLong(args[4]) : Long.MAX_VALUE; s.setTimeRange(startTime, endTime); // Set cache blocks s.setCacheBlocks(false); @@ -134,8 +135,8 @@ static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOE } // Set RowFilter or Prefix Filter if applicable. Filter exportFilter = getExportFilter(args); - if (exportFilter!= null) { - LOG.info("Setting Scan Filter for Export."); + if (exportFilter != null) { + LOG.info("Setting Scan Filter for Export."); s.setFilter(exportFilter); } List labels = null; @@ -163,9 +164,8 @@ static Scan getScanFromCommandLine(Configuration conf, String[] args) throws IOE LOG.error("Caching could not be set", e); } } - LOG.info("versions=" + versions + ", starttime=" + startTime - + ", endtime=" + endTime + ", keepDeletedCells=" + raw - + ", visibility labels=" + labels); + LOG.info("versions=" + versions + ", starttime=" + startTime + ", endtime=" + endTime + + ", keepDeletedCells=" + raw + ", visibility labels=" + labels); return s; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java index 1909b2d57b38..61ad4a944714 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/GroupingTableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +19,6 @@ import java.io.IOException; import java.util.ArrayList; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.Cell; @@ -31,74 +28,68 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; /** * Extract grouping columns from input record. */ @InterfaceAudience.Public -public class GroupingTableMapper -extends TableMapper implements Configurable { +public class GroupingTableMapper extends TableMapper + implements Configurable { /** - * JobConf parameter to specify the columns used to produce the key passed to - * collect from the map phase. + * JobConf parameter to specify the columns used to produce the key passed to collect from the map + * phase. */ - public static final String GROUP_COLUMNS = - "hbase.mapred.groupingtablemap.columns"; + public static final String GROUP_COLUMNS = "hbase.mapred.groupingtablemap.columns"; /** The grouping columns. */ - protected byte [][] columns; + protected byte[][] columns; /** The current configuration. */ private Configuration conf = null; /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table The table to be processed. - * @param scan The scan with the columns etc. - * @param groupColumns A space separated list of columns used to form the - * key used in collect. - * @param mapper The mapper class. - * @param job The current job. + * @param scan The scan with the columns etc. + * @param groupColumns A space separated list of columns used to form the key used in collect. + * @param mapper The mapper class. + * @param job The current job. * @throws IOException When setting up the job fails. */ @SuppressWarnings("unchecked") public static void initJob(String table, Scan scan, String groupColumns, - Class mapper, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(table, scan, mapper, - ImmutableBytesWritable.class, Result.class, job); + Class mapper, Job job) throws IOException { + TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, + Result.class, job); job.getConfiguration().set(GROUP_COLUMNS, groupColumns); } /** - * Extract the grouping columns from value to construct a new key. Pass the - * new key and value to reduce. If any of the grouping columns are not found - * in the value, the record is skipped. - * - * @param key The current key. - * @param value The current value. - * @param context The current context. + * Extract the grouping columns from value to construct a new key. Pass the new key and value to + * reduce. If any of the grouping columns are not found in the value, the record is skipped. + * @param key The current key. + * @param value The current value. + * @param context The current context. * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ @Override public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { byte[][] keyVals = extractKeyValues(value); - if(keyVals != null) { + if (keyVals != null) { ImmutableBytesWritable tKey = createGroupKey(keyVals); context.write(tKey, value); } } /** - * Extract columns values from the current record. This method returns - * null if any of the columns are not found. + * Extract columns values from the current record. This method returns null if any of the columns + * are not found. *

    * Override this method if you want to deal with nulls differently. - * - * @param r The current values. + * @param r The current values. * @return Array of byte values. */ protected byte[][] extractKeyValues(Result r) { @@ -106,9 +97,9 @@ protected byte[][] extractKeyValues(Result r) { ArrayList foundList = new ArrayList<>(); int numCols = columns.length; if (numCols > 0) { - for (Cell value: r.listCells()) { - byte [] column = CellUtil.makeColumn(CellUtil.cloneFamily(value), - CellUtil.cloneQualifier(value)); + for (Cell value : r.listCells()) { + byte[] column = + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value)); for (int i = 0; i < numCols; i++) { if (Bytes.equals(column, columns[i])) { foundList.add(CellUtil.cloneValue(value)); @@ -116,7 +107,7 @@ protected byte[][] extractKeyValues(Result r) { } } } - if(foundList.size() == numCols) { + if (foundList.size() == numCols) { keyVals = foundList.toArray(new byte[numCols][]); } } @@ -127,17 +118,16 @@ protected byte[][] extractKeyValues(Result r) { * Create a key by concatenating multiple column values. *

    * Override this function in order to produce different types of keys. - * - * @param vals The current key/values. + * @param vals The current key/values. * @return A key generated by concatenating multiple column values. */ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { - if(vals == null) { + if (vals == null) { return null; } - StringBuilder sb = new StringBuilder(); - for(int i = 0; i < vals.length; i++) { - if(i > 0) { + StringBuilder sb = new StringBuilder(); + for (int i = 0; i < vals.length; i++) { + if (i > 0) { sb.append(" "); } sb.append(Bytes.toString(vals[i])); @@ -147,7 +137,6 @@ protected ImmutableBytesWritable createGroupKey(byte[][] vals) { /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -158,17 +147,15 @@ public Configuration getConf() { /** * Sets the configuration. This is used to set up the grouping details. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { this.conf = configuration; String[] cols = conf.get(GROUP_COLUMNS, "").split(" "); columns = new byte[cols.length][]; - for(int i = 0; i < cols.length; i++) { + for (int i = 0; i < cols.length; i++) { columns[i] = Bytes.toBytes(cols[i]); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java index 03254feec042..24e164c550fd 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,9 +41,8 @@ import org.slf4j.LoggerFactory; /** - * Simple MR input format for HFiles. - * This code was borrowed from Apache Crunch project. - * Updated to the recent version of HBase. + * Simple MR input format for HFiles. This code was borrowed from Apache Crunch project. Updated to + * the recent version of HBase. */ @InterfaceAudience.Private public class HFileInputFormat extends FileInputFormat { @@ -51,9 +50,9 @@ public class HFileInputFormat extends FileInputFormat { private static final Logger LOG = LoggerFactory.getLogger(HFileInputFormat.class); /** - * File filter that removes all "hidden" files. This might be something worth removing from - * a more general purpose utility; it accounts for the presence of metadata files created - * in the way we're doing exports. + * File filter that removes all "hidden" files. This might be something worth removing from a more + * general purpose utility; it accounts for the presence of metadata files created in the way + * we're doing exports. */ static final PathFilter HIDDEN_FILE_FILTER = new PathFilter() { @Override @@ -95,7 +94,6 @@ public void initialize(InputSplit split, TaskAttemptContext context) } - @Override public boolean nextKeyValue() throws IOException, InterruptedException { boolean hasNext; @@ -161,8 +159,8 @@ protected List listStatus(JobContext job) throws IOException { } @Override - public RecordReader createRecordReader(InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { return new HFileRecordReader(); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java index a3c3f11c5aa8..30e0ce3fcd91 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HFileOutputFormat2.java @@ -93,18 +93,17 @@ import org.slf4j.LoggerFactory; /** - * Writes HFiles. Passed Cells must arrive in order. - * Writes current time as the sequence id for the file. Sets the major compacted - * attribute on created {@link HFile}s. Calling write(null,null) will forcibly roll - * all HFiles being written. + * Writes HFiles. Passed Cells must arrive in order. Writes current time as the sequence id for the + * file. Sets the major compacted attribute on created {@link HFile}s. Calling write(null,null) will + * forcibly roll all HFiles being written. *

    - * Using this class as part of a MapReduce job is best done - * using {@link #configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}. + * Using this class as part of a MapReduce job is best done using + * {@link #configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}. */ @InterfaceAudience.Public -public class HFileOutputFormat2 - extends FileOutputFormat { +public class HFileOutputFormat2 extends FileOutputFormat { private static final Logger LOG = LoggerFactory.getLogger(HFileOutputFormat2.class); + static class TableInfo { private TableDescriptor tableDesctiptor; private RegionLocator regionLocator; @@ -118,7 +117,7 @@ public TableInfo(TableDescriptor tableDesctiptor, RegionLocator regionLocator) { * The modification for the returned HTD doesn't affect the inner TD. * @return A clone of inner table descriptor * @deprecated since 2.0.0 and will be removed in 3.0.0. Use {@link #getTableDescriptor()} - * instead. + * instead. * @see #getTableDescriptor() * @see HBASE-18241 */ @@ -148,12 +147,9 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) // These should not be changed by the client. static final String COMPRESSION_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.compression"; - static final String BLOOM_TYPE_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomtype"; - static final String BLOOM_PARAM_FAMILIES_CONF_KEY = - "hbase.hfileoutputformat.families.bloomparam"; - static final String BLOCK_SIZE_FAMILIES_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.blocksize"; + static final String BLOOM_TYPE_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.bloomtype"; + static final String BLOOM_PARAM_FAMILIES_CONF_KEY = "hbase.hfileoutputformat.families.bloomparam"; + static final String BLOCK_SIZE_FAMILIES_CONF_KEY = "hbase.mapreduce.hfileoutputformat.blocksize"; static final String DATABLOCK_ENCODING_FAMILIES_CONF_KEY = "hbase.mapreduce.hfileoutputformat.families.datablock.encoding"; @@ -172,26 +168,24 @@ protected static byte[] combineTableNameSuffix(byte[] tableName, byte[] suffix) public static final String LOCALITY_SENSITIVE_CONF_KEY = "hbase.bulkload.locality.sensitive.enabled"; private static final boolean DEFAULT_LOCALITY_SENSITIVE = true; - static final String OUTPUT_TABLE_NAME_CONF_KEY = - "hbase.mapreduce.hfileoutputformat.table.name"; + static final String OUTPUT_TABLE_NAME_CONF_KEY = "hbase.mapreduce.hfileoutputformat.table.name"; static final String MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY = - "hbase.mapreduce.use.multi.table.hfileoutputformat"; + "hbase.mapreduce.use.multi.table.hfileoutputformat"; - public static final String REMOTE_CLUSTER_CONF_PREFIX = - "hbase.hfileoutputformat.remote.cluster."; + public static final String REMOTE_CLUSTER_CONF_PREFIX = "hbase.hfileoutputformat.remote.cluster."; public static final String REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY = - REMOTE_CLUSTER_CONF_PREFIX + "zookeeper.quorum"; + REMOTE_CLUSTER_CONF_PREFIX + "zookeeper.quorum"; public static final String REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY = - REMOTE_CLUSTER_CONF_PREFIX + "zookeeper." + HConstants.CLIENT_PORT_STR; + REMOTE_CLUSTER_CONF_PREFIX + "zookeeper." + HConstants.CLIENT_PORT_STR; public static final String REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY = - REMOTE_CLUSTER_CONF_PREFIX + HConstants.ZOOKEEPER_ZNODE_PARENT; + REMOTE_CLUSTER_CONF_PREFIX + HConstants.ZOOKEEPER_ZNODE_PARENT; public static final String STORAGE_POLICY_PROPERTY = HStore.BLOCK_STORAGE_POLICY_KEY; public static final String STORAGE_POLICY_PROPERTY_CF_PREFIX = STORAGE_POLICY_PROPERTY + "."; @Override - public RecordWriter getRecordWriter( - final TaskAttemptContext context) throws IOException, InterruptedException { + public RecordWriter + getRecordWriter(final TaskAttemptContext context) throws IOException, InterruptedException { return createRecordWriter(context, this.getOutputCommitter(context)); } @@ -200,32 +194,32 @@ protected static byte[] getTableNameSuffixedWithFamily(byte[] tableName, byte[] } static RecordWriter createRecordWriter( - final TaskAttemptContext context, final OutputCommitter committer) throws IOException { + final TaskAttemptContext context, final OutputCommitter committer) throws IOException { // Get the path of the temporary output file - final Path outputDir = ((FileOutputCommitter)committer).getWorkPath(); + final Path outputDir = ((FileOutputCommitter) committer).getWorkPath(); final Configuration conf = context.getConfiguration(); final boolean writeMultipleTables = - conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); + conf.getBoolean(MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); final String writeTableNames = conf.get(OUTPUT_TABLE_NAME_CONF_KEY); if (writeTableNames == null || writeTableNames.isEmpty()) { throw new IllegalArgumentException("" + OUTPUT_TABLE_NAME_CONF_KEY + " cannot be empty"); } final FileSystem fs = outputDir.getFileSystem(conf); // These configs. are from hbase-*.xml - final long maxsize = conf.getLong(HConstants.HREGION_MAX_FILESIZE, - HConstants.DEFAULT_MAX_FILE_SIZE); - // Invented config. Add to hbase-*.xml if other than default compression. - final String defaultCompressionStr = conf.get("hfile.compression", - Compression.Algorithm.NONE.getName()); + final long maxsize = + conf.getLong(HConstants.HREGION_MAX_FILESIZE, HConstants.DEFAULT_MAX_FILE_SIZE); + // Invented config. Add to hbase-*.xml if other than default compression. + final String defaultCompressionStr = + conf.get("hfile.compression", Compression.Algorithm.NONE.getName()); final Algorithm defaultCompression = HFileWriterImpl.compressionByName(defaultCompressionStr); String compressionStr = conf.get(COMPRESSION_OVERRIDE_CONF_KEY); - final Algorithm overriddenCompression = compressionStr != null ? - Compression.getCompressionAlgorithmByName(compressionStr): null; - final boolean compactionExclude = conf.getBoolean( - "hbase.mapreduce.hfileoutputformat.compaction.exclude", false); - final Set allTableNames = Arrays.stream(writeTableNames.split( - Bytes.toString(tableSeparator))).collect(Collectors.toSet()); + final Algorithm overriddenCompression = + compressionStr != null ? Compression.getCompressionAlgorithmByName(compressionStr) : null; + final boolean compactionExclude = + conf.getBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", false); + final Set allTableNames = Arrays + .stream(writeTableNames.split(Bytes.toString(tableSeparator))).collect(Collectors.toSet()); // create a map from column family to the compression algorithm final Map compressionMap = createFamilyCompressionMap(conf); @@ -234,10 +228,10 @@ static RecordWriter createRecordWrit final Map blockSizeMap = createFamilyBlockSizeMap(conf); String dataBlockEncodingStr = conf.get(DATABLOCK_ENCODING_OVERRIDE_CONF_KEY); - final Map datablockEncodingMap - = createFamilyDataBlockEncodingMap(conf); - final DataBlockEncoding overriddenEncoding = dataBlockEncodingStr != null ? - DataBlockEncoding.valueOf(dataBlockEncodingStr) : null; + final Map datablockEncodingMap = + createFamilyDataBlockEncodingMap(conf); + final DataBlockEncoding overriddenEncoding = + dataBlockEncodingStr != null ? DataBlockEncoding.valueOf(dataBlockEncodingStr) : null; return new RecordWriter() { // Map of families to writers and how much has been output on the writer. @@ -262,8 +256,8 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { tableNameBytes = MultiTableHFileOutputFormat.getTableName(row.get()); tableNameBytes = TableName.valueOf(tableNameBytes).toBytes(); if (!allTableNames.contains(Bytes.toString(tableNameBytes))) { - throw new IllegalArgumentException("TableName " + Bytes.toString(tableNameBytes) + - " not expected"); + throw new IllegalArgumentException( + "TableName " + Bytes.toString(tableNameBytes) + " not expected"); } } else { tableNameBytes = Bytes.toBytes(writeTableNames); @@ -276,9 +270,8 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { if (wl == null) { Path writerPath = null; if (writeMultipleTables) { - writerPath = new Path(outputDir,new Path(tableRelPath, Bytes.toString(family))); - } - else { + writerPath = new Path(outputDir, new Path(tableRelPath, Bytes.toString(family))); + } else { writerPath = new Path(outputDir, Bytes.toString(family)); } fs.mkdirs(writerPath); @@ -287,7 +280,7 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { // This can only happen once a row is finished though if (wl != null && wl.written + length >= maxsize - && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0) { + && Bytes.compareTo(this.previousRows.get(family), rowKey) != 0) { rollWriters(wl); } @@ -298,16 +291,18 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { String tableName = Bytes.toString(tableNameBytes); if (tableName != null) { - try (Connection connection = ConnectionFactory.createConnection( - createRemoteClusterConf(conf)); - RegionLocator locator = - connection.getRegionLocator(TableName.valueOf(tableName))) { + try ( + Connection connection = + ConnectionFactory.createConnection(createRemoteClusterConf(conf)); + RegionLocator locator = + connection.getRegionLocator(TableName.valueOf(tableName))) { loc = locator.getRegionLocation(rowKey); } catch (Throwable e) { - LOG.warn("Something wrong locating rowkey {} in {}", - Bytes.toString(rowKey), tableName, e); + LOG.warn("Something wrong locating rowkey {} in {}", Bytes.toString(rowKey), + tableName, e); loc = null; - } } + } + } if (null == loc) { LOG.trace("Failed get of location, use default writer {}", Bytes.toString(rowKey)); @@ -321,8 +316,8 @@ public void write(ImmutableBytesWritable row, V cell) throws IOException { wl = getNewWriter(tableNameBytes, family, conf, null); } else { LOG.debug("Use favored nodes writer: {}", initialIsa.getHostString()); - wl = getNewWriter(tableNameBytes, family, conf, new InetSocketAddress[] { initialIsa - }); + wl = getNewWriter(tableNameBytes, family, conf, + new InetSocketAddress[] { initialIsa }); } } } else { @@ -361,8 +356,8 @@ private void rollWriters(WriterLength writerLength) throws IOException { private void closeWriter(WriterLength wl) throws IOException { if (wl.writer != null) { - LOG.info("Writer=" + wl.writer.getPath() + - ((wl.written == 0)? "": ", wrote=" + wl.written)); + LOG.info( + "Writer=" + wl.writer.getPath() + ((wl.written == 0) ? "" : ", wrote=" + wl.written)); close(wl.writer); wl.writer = null; } @@ -384,9 +379,9 @@ private Configuration createRemoteClusterConf(Configuration conf) { for (Entry entry : conf) { String key = entry.getKey(); - if (REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) || - REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) || - REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key)) { + if (REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY.equals(key) + || REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY.equals(key) + || REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY.equals(key)) { // Handled them above continue; } @@ -406,15 +401,15 @@ private Configuration createRemoteClusterConf(Configuration conf) { * Create a new StoreFile.Writer. * @return A WriterLength, containing a new StoreFile.Writer. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="BX_UNBOXING_IMMEDIATELY_REBOXED", - justification="Not important") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "BX_UNBOXING_IMMEDIATELY_REBOXED", + justification = "Not important") private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration conf, InetSocketAddress[] favoredNodes) throws IOException { byte[] tableAndFamily = getTableNameSuffixedWithFamily(tableName, family); Path familydir = new Path(outputDir, Bytes.toString(family)); if (writeMultipleTables) { familydir = new Path(outputDir, - new Path(getTableRelativePath(tableName), Bytes.toString(family))); + new Path(getTableRelativePath(tableName), Bytes.toString(family))); } WriterLength wl = new WriterLength(); Algorithm compression = overriddenCompression; @@ -432,9 +427,9 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration encoding = encoding == null ? datablockEncodingMap.get(tableAndFamily) : encoding; encoding = encoding == null ? DataBlockEncoding.NONE : encoding; HFileContextBuilder contextBuilder = new HFileContextBuilder().withCompression(compression) - .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) - .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) - .withColumnFamily(family).withTableName(tableName); + .withDataBlockEncoding(encoding).withChecksumType(StoreUtils.getChecksumType(conf)) + .withBytesPerCheckSum(StoreUtils.getBytesPerChecksum(conf)).withBlockSize(blockSize) + .withColumnFamily(family).withTableName(tableName); if (HFile.getFormatVersion(conf) >= HFile.MIN_FORMAT_VERSION_WITH_TAGS) { contextBuilder.withIncludesTags(true); @@ -442,13 +437,13 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration HFileContext hFileContext = contextBuilder.build(); if (null == favoredNodes) { - wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext).build(); + wl.writer = + new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, fs).withOutputDir(familydir) + .withBloomType(bloomType).withFileContext(hFileContext).build(); } else { wl.writer = new StoreFileWriter.Builder(conf, CacheConfig.DISABLED, new HFileSystem(fs)) - .withOutputDir(familydir).withBloomType(bloomType) - .withFileContext(hFileContext).withFavoredNodes(favoredNodes).build(); + .withOutputDir(familydir).withBloomType(bloomType).withFileContext(hFileContext) + .withFavoredNodes(favoredNodes).build(); } this.writers.put(tableAndFamily, wl); @@ -457,10 +452,8 @@ private WriterLength getNewWriter(byte[] tableName, byte[] family, Configuration private void close(final StoreFileWriter w) throws IOException { if (w != null) { - w.appendFileInfo(BULKLOAD_TIME_KEY, - Bytes.toBytes(EnvironmentEdgeManager.currentTime())); - w.appendFileInfo(BULKLOAD_TASK_KEY, - Bytes.toBytes(context.getTaskAttemptID().toString())); + w.appendFileInfo(BULKLOAD_TIME_KEY, Bytes.toBytes(EnvironmentEdgeManager.currentTime())); + w.appendFileInfo(BULKLOAD_TASK_KEY, Bytes.toBytes(context.getTaskAttemptID().toString())); w.appendFileInfo(MAJOR_COMPACTION_KEY, Bytes.toBytes(true)); w.appendFileInfo(EXCLUDE_FROM_MINOR_COMPACTION_KEY, Bytes.toBytes(compactionExclude)); w.appendTrackedTimestampsToMetadata(); @@ -470,7 +463,7 @@ private void close(final StoreFileWriter w) throws IOException { @Override public void close(TaskAttemptContext c) throws IOException, InterruptedException { - for (WriterLength wl: this.writers.values()) { + for (WriterLength wl : this.writers.values()) { close(wl.writer); } } @@ -486,9 +479,8 @@ static void configureStoragePolicy(final Configuration conf, final FileSystem fs return; } - String policy = - conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), - conf.get(STORAGE_POLICY_PROPERTY)); + String policy = conf.get(STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString(tableAndFamily), + conf.get(STORAGE_POLICY_PROPERTY)); CommonFSUtils.setStoragePolicy(fs, cfPath, policy); } @@ -501,22 +493,20 @@ static class WriterLength { } /** - * Return the start keys of all of the regions in this table, - * as a list of ImmutableBytesWritable. + * Return the start keys of all of the regions in this table, as a list of ImmutableBytesWritable. */ private static List getRegionStartKeys(List regionLocators, - boolean writeMultipleTables) - throws IOException { + boolean writeMultipleTables) throws IOException { ArrayList ret = new ArrayList<>(); - for(RegionLocator regionLocator : regionLocators) { + for (RegionLocator regionLocator : regionLocators) { TableName tableName = regionLocator.getName(); LOG.info("Looking up current regions for table " + tableName); byte[][] byteKeys = regionLocator.getStartKeys(); for (byte[] byteKey : byteKeys) { - byte[] fullKey = byteKey; //HFileOutputFormat2 use case + byte[] fullKey = byteKey; // HFileOutputFormat2 use case if (writeMultipleTables) { - //MultiTableHFileOutputFormat use case + // MultiTableHFileOutputFormat use case fullKey = combineTableNameSuffix(tableName.getName(), byteKey); } if (LOG.isDebugEnabled()) { @@ -529,8 +519,8 @@ private static List getRegionStartKeys(List - *

  • Inspects the table to configure a total order partitioner
  • - *
  • Uploads the partitions file to the cluster and adds it to the DistributedCache
  • - *
  • Sets the number of reduce tasks to match the current number of regions
  • - *
  • Sets the output key/value class to match HFileOutputFormat2's requirements
  • - *
  • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or - * PutSortReducer)
  • - *
  • Sets the HBase cluster key to load region locations for locality-sensitive
  • + *
  • Inspects the table to configure a total order partitioner
  • + *
  • Uploads the partitions file to the cluster and adds it to the DistributedCache
  • + *
  • Sets the number of reduce tasks to match the current number of regions
  • + *
  • Sets the output key/value class to match HFileOutputFormat2's requirements
  • + *
  • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or + * PutSortReducer)
  • + *
  • Sets the HBase cluster key to load region locations for locality-sensitive
  • * * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. @@ -594,15 +582,14 @@ public static void configureIncrementalLoad(Job job, Table table, RegionLocator } /** - * Configure a MapReduce Job to perform an incremental load into the given - * table. This + * Configure a MapReduce Job to perform an incremental load into the given table. This *
      - *
    • Inspects the table to configure a total order partitioner
    • - *
    • Uploads the partitions file to the cluster and adds it to the DistributedCache
    • - *
    • Sets the number of reduce tasks to match the current number of regions
    • - *
    • Sets the output key/value class to match HFileOutputFormat2's requirements
    • - *
    • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or - * PutSortReducer)
    • + *
    • Inspects the table to configure a total order partitioner
    • + *
    • Uploads the partitions file to the cluster and adds it to the DistributedCache
    • + *
    • Sets the number of reduce tasks to match the current number of regions
    • + *
    • Sets the output key/value class to match HFileOutputFormat2's requirements
    • + *
    • Sets the reducer up to perform the appropriate sorting (either KeyValueSortReducer or + * PutSortReducer)
    • *
    * The user should be sure to set the map output value class to either KeyValue or Put before * running this function. @@ -644,8 +631,8 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, } conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); if (conf.getBoolean(LOCALITY_SENSITIVE_CONF_KEY, DEFAULT_LOCALITY_SENSITIVE)) { LOG.info("bulkload locality sensitive enabled"); @@ -656,43 +643,43 @@ static void configureIncrementalLoad(Job job, List multiTableInfo, List regionLocators = new ArrayList<>(multiTableInfo.size()); List tableDescriptors = new ArrayList<>(multiTableInfo.size()); - for(TableInfo tableInfo : multiTableInfo) { + for (TableInfo tableInfo : multiTableInfo) { regionLocators.add(tableInfo.getRegionLocator()); allTableNames.add(tableInfo.getRegionLocator().getName().getNameAsString()); tableDescriptors.add(tableInfo.getTableDescriptor()); } // Record tablenames for creating writer by favored nodes, and decoding compression, // block size and other attributes of columnfamily per table - conf.set(OUTPUT_TABLE_NAME_CONF_KEY, StringUtils.join(allTableNames, Bytes - .toString(tableSeparator))); + conf.set(OUTPUT_TABLE_NAME_CONF_KEY, + StringUtils.join(allTableNames, Bytes.toString(tableSeparator))); List startKeys = - getRegionStartKeys(regionLocators, writeMultipleTables); + getRegionStartKeys(regionLocators, writeMultipleTables); // Use table's region boundaries for TOP split points. - LOG.info("Configuring " + startKeys.size() + " reduce partitions " + - "to match current region count for all tables"); + LOG.info("Configuring " + startKeys.size() + " reduce partitions " + + "to match current region count for all tables"); job.setNumReduceTasks(startKeys.size()); configurePartitioner(job, startKeys, writeMultipleTables); // Set compression algorithms based on column families - conf.set(COMPRESSION_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(compressionDetails, - tableDescriptors)); - conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(blockSizeDetails, - tableDescriptors)); - conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomTypeDetails, - tableDescriptors)); - conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, serializeColumnFamilyAttribute(bloomParamDetails, - tableDescriptors)); + conf.set(COMPRESSION_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(compressionDetails, tableDescriptors)); + conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(blockSizeDetails, tableDescriptors)); + conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(bloomTypeDetails, tableDescriptors)); + conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, + serializeColumnFamilyAttribute(bloomParamDetails, tableDescriptors)); conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(dataBlockEncodingDetails, tableDescriptors)); + serializeColumnFamilyAttribute(dataBlockEncodingDetails, tableDescriptors)); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); LOG.info("Incremental output configured for tables: " + StringUtils.join(allTableNames, ",")); } - public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) throws - IOException { + public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDescriptor) + throws IOException { Configuration conf = job.getConfiguration(); job.setOutputKeyClass(ImmutableBytesWritable.class); @@ -705,15 +692,15 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes conf.set(OUTPUT_TABLE_NAME_CONF_KEY, tableDescriptor.getTableName().getNameAsString()); // Set compression algorithms based on column families conf.set(COMPRESSION_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(compressionDetails, singleTableDescriptor)); conf.set(BLOCK_SIZE_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(blockSizeDetails, singleTableDescriptor)); conf.set(BLOOM_TYPE_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(bloomTypeDetails, singleTableDescriptor)); conf.set(BLOOM_PARAM_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(bloomParamDetails, singleTableDescriptor)); conf.set(DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor)); + serializeColumnFamilyAttribute(dataBlockEncodingDetails, singleTableDescriptor)); TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.initCredentials(job); @@ -722,21 +709,16 @@ public static void configureIncrementalLoadMap(Job job, TableDescriptor tableDes /** * Configure HBase cluster key for remote cluster to load region location for locality-sensitive - * if it's enabled. - * It's not necessary to call this method explicitly when the cluster key for HBase cluster to be - * used to load region location is configured in the job configuration. - * Call this method when another HBase cluster key is configured in the job configuration. - * For example, you should call when you load data from HBase cluster A using - * {@link TableInputFormat} and generate hfiles for HBase cluster B. - * Otherwise, HFileOutputFormat2 fetch location from cluster A and locality-sensitive won't - * working correctly. + * if it's enabled. It's not necessary to call this method explicitly when the cluster key for + * HBase cluster to be used to load region location is configured in the job configuration. Call + * this method when another HBase cluster key is configured in the job configuration. For example, + * you should call when you load data from HBase cluster A using {@link TableInputFormat} and + * generate hfiles for HBase cluster B. Otherwise, HFileOutputFormat2 fetch location from cluster + * A and locality-sensitive won't working correctly. * {@link #configureIncrementalLoad(Job, Table, RegionLocator)} calls this method using - * {@link Table#getConfiguration} as clusterConf. - * See HBASE-25608. - * + * {@link Table#getConfiguration} as clusterConf. See HBASE-25608. * @param job which has configuration to be updated * @param clusterConf which contains cluster key of the HBase cluster to be locality-sensitive - * * @see #configureIncrementalLoad(Job, Table, RegionLocator) * @see #LOCALITY_SENSITIVE_CONF_KEY * @see #REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY @@ -751,31 +733,28 @@ public static void configureRemoteCluster(Job job, Configuration clusterConf) { } final String quorum = clusterConf.get(HConstants.ZOOKEEPER_QUORUM); - final int clientPort = clusterConf.getInt( - HConstants.ZOOKEEPER_CLIENT_PORT, HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); - final String parent = clusterConf.get( - HConstants.ZOOKEEPER_ZNODE_PARENT, HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); + final int clientPort = clusterConf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, + HConstants.DEFAULT_ZOOKEEPER_CLIENT_PORT); + final String parent = clusterConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT, + HConstants.DEFAULT_ZOOKEEPER_ZNODE_PARENT); conf.set(REMOTE_CLUSTER_ZOOKEEPER_QUORUM_CONF_KEY, quorum); conf.setInt(REMOTE_CLUSTER_ZOOKEEPER_CLIENT_PORT_CONF_KEY, clientPort); conf.set(REMOTE_CLUSTER_ZOOKEEPER_ZNODE_PARENT_CONF_KEY, parent); - LOG.info("ZK configs for remote cluster of bulkload is configured: " + - quorum + ":" + clientPort + "/" + parent); + LOG.info("ZK configs for remote cluster of bulkload is configured: " + quorum + ":" + clientPort + + "/" + parent); } /** - * Runs inside the task to deserialize column family to compression algorithm - * map from the configuration. - * + * Runs inside the task to deserialize column family to compression algorithm map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the configured compression algorithm */ @InterfaceAudience.Private - static Map createFamilyCompressionMap(Configuration - conf) { - Map stringMap = createFamilyConfValueMap(conf, - COMPRESSION_FAMILIES_CONF_KEY); + static Map createFamilyCompressionMap(Configuration conf) { + Map stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY); Map compressionMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Algorithm algorithm = HFileWriterImpl.compressionByName(e.getValue()); @@ -785,16 +764,14 @@ static Map createFamilyCompressionMap(Configuration } /** - * Runs inside the task to deserialize column family to bloom filter type - * map from the configuration. - * + * Runs inside the task to deserialize column family to bloom filter type map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter type */ @InterfaceAudience.Private static Map createFamilyBloomTypeMap(Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - BLOOM_TYPE_FAMILIES_CONF_KEY); + Map stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); Map bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { BloomType bloomType = BloomType.valueOf(e.getValue()); @@ -804,9 +781,8 @@ static Map createFamilyBloomTypeMap(Configuration conf) { } /** - * Runs inside the task to deserialize column family to bloom filter param - * map from the configuration. - * + * Runs inside the task to deserialize column family to bloom filter param map from the + * configuration. * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter param */ @@ -815,18 +791,14 @@ static Map createFamilyBloomParamMap(Configuration conf) { return createFamilyConfValueMap(conf, BLOOM_PARAM_FAMILIES_CONF_KEY); } - /** - * Runs inside the task to deserialize column family to block size - * map from the configuration. - * + * Runs inside the task to deserialize column family to block size map from the configuration. * @param conf to read the serialized values from * @return a map from column family to the configured block size */ @InterfaceAudience.Private static Map createFamilyBlockSizeMap(Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - BLOCK_SIZE_FAMILIES_CONF_KEY); + Map stringMap = createFamilyConfValueMap(conf, BLOCK_SIZE_FAMILIES_CONF_KEY); Map blockSizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { Integer blockSize = Integer.parseInt(e.getValue()); @@ -836,18 +808,16 @@ static Map createFamilyBlockSizeMap(Configuration conf) { } /** - * Runs inside the task to deserialize column family to data block encoding - * type map from the configuration. - * + * Runs inside the task to deserialize column family to data block encoding type map from the + * configuration. * @param conf to read the serialized values from - * @return a map from column family to HFileDataBlockEncoder for the - * configured data block type for the family + * @return a map from column family to HFileDataBlockEncoder for the configured data block type + * for the family */ @InterfaceAudience.Private - static Map createFamilyDataBlockEncodingMap( - Configuration conf) { - Map stringMap = createFamilyConfValueMap(conf, - DATABLOCK_ENCODING_FAMILIES_CONF_KEY); + static Map createFamilyDataBlockEncodingMap(Configuration conf) { + Map stringMap = + createFamilyConfValueMap(conf, DATABLOCK_ENCODING_FAMILIES_CONF_KEY); Map encoderMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry e : stringMap.entrySet()) { encoderMap.put(e.getKey(), DataBlockEncoding.valueOf((e.getValue()))); @@ -855,16 +825,13 @@ static Map createFamilyDataBlockEncodingMap( return encoderMap; } - /** * Run inside the task to deserialize column family to given conf value map. - * * @param conf to read the serialized values from * @param confName conf key to read from the configuration * @return a map of column family to the given configuration value */ - private static Map createFamilyConfValueMap( - Configuration conf, String confName) { + private static Map createFamilyConfValueMap(Configuration conf, String confName) { Map confValMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); String confVal = conf.get(confName, ""); for (String familyConf : confVal.split("&")) { @@ -874,7 +841,7 @@ private static Map createFamilyConfValueMap( } try { confValMap.put(Bytes.toBytes(URLDecoder.decode(familySplit[0], "UTF-8")), - URLDecoder.decode(familySplit[1], "UTF-8")); + URLDecoder.decode(familySplit[1], "UTF-8")); } catch (UnsupportedEncodingException e) { // will not happen with UTF-8 encoding throw new AssertionError(e); @@ -887,15 +854,13 @@ private static Map createFamilyConfValueMap( * Configure job with a TotalOrderPartitioner, partitioning against * splitPoints. Cleans up the partitions file after job exists. */ - static void configurePartitioner(Job job, List splitPoints, boolean - writeMultipleTables) - throws IOException { + static void configurePartitioner(Job job, List splitPoints, + boolean writeMultipleTables) throws IOException { Configuration conf = job.getConfiguration(); // create the partitions file FileSystem fs = FileSystem.get(conf); - String hbaseTmpFsDir = - conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, - HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); + String hbaseTmpFsDir = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY, + HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY); Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID()); fs.makeQualified(partitionsPath); writePartitions(conf, partitionsPath, splitPoints, writeMultipleTables); @@ -906,12 +871,11 @@ static void configurePartitioner(Job job, List splitPoin TotalOrderPartitioner.setPartitionFile(conf, partitionsPath); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = - "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") + @edu.umd.cs.findbugs.annotations.SuppressWarnings( + value = "RCN_REDUNDANT_NULLCHECK_OF_NONNULL_VALUE") @InterfaceAudience.Private static String serializeColumnFamilyAttribute(Function fn, - List allTables) - throws UnsupportedEncodingException { + List allTables) throws UnsupportedEncodingException { StringBuilder attributeValue = new StringBuilder(); int i = 0; for (TableDescriptor tableDescriptor : allTables) { @@ -924,9 +888,9 @@ static String serializeColumnFamilyAttribute(Function 0) { attributeValue.append('&'); } - attributeValue.append(URLEncoder.encode( - Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), - familyDescriptor.getName())), "UTF-8")); + attributeValue.append(URLEncoder + .encode(Bytes.toString(combineTableNameSuffix(tableDescriptor.getTableName().getName(), + familyDescriptor.getName())), "UTF-8")); attributeValue.append('='); attributeValue.append(URLEncoder.encode(fn.apply(familyDescriptor), "UTF-8")); } @@ -936,24 +900,24 @@ static String serializeColumnFamilyAttribute(Function compressionDetails = familyDescriptor -> - familyDescriptor.getCompressionType().getName(); + static Function compressionDetails = + familyDescriptor -> familyDescriptor.getCompressionType().getName(); /** - * Serialize column family to block size map to configuration. Invoked while - * configuring the MR job for incremental load. + * Serialize column family to block size map to configuration. Invoked while configuring the MR + * job for incremental load. */ @InterfaceAudience.Private - static Function blockSizeDetails = familyDescriptor -> String - .valueOf(familyDescriptor.getBlocksize()); + static Function blockSizeDetails = + familyDescriptor -> String.valueOf(familyDescriptor.getBlocksize()); /** - * Serialize column family to bloom type map to configuration. Invoked while - * configuring the MR job for incremental load. + * Serialize column family to bloom type map to configuration. Invoked while configuring the MR + * job for incremental load. */ @InterfaceAudience.Private static Function bloomTypeDetails = familyDescriptor -> { @@ -965,8 +929,8 @@ static String serializeColumnFamilyAttribute(Function bloomParamDetails = familyDescriptor -> { @@ -979,8 +943,8 @@ static String serializeColumnFamilyAttribute(Function dataBlockEncodingDetails = familyDescriptor -> { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java index 12f2e86fa806..77d1e9afe9c6 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HRegionPartitioner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,9 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; @@ -33,24 +29,25 @@ import org.apache.hadoop.hbase.mapred.TableOutputFormat; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Partitioner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * This is used to partition the output keys into groups of keys. - * Keys are grouped according to the regions that currently exist - * so that each reducer fills a single region so load is distributed. - * - *

    This class is not suitable as partitioner creating hfiles - * for incremental bulk loads as region spread will likely change between time of - * hfile creation and load time. See {@link org.apache.hadoop.hbase.tool.LoadIncrementalHFiles} - * and Bulk Load.

    - * - * @param The type of the key. - * @param The type of the value. + * This is used to partition the output keys into groups of keys. Keys are grouped according to the + * regions that currently exist so that each reducer fills a single region so load is distributed. + *

    + * This class is not suitable as partitioner creating hfiles for incremental bulk loads as region + * spread will likely change between time of hfile creation and load time. See + * {@link org.apache.hadoop.hbase.tool.LoadIncrementalHFiles} and + * Bulk Load. + *

    + * @param The type of the key. + * @param The type of the value. */ @InterfaceAudience.Public -public class HRegionPartitioner -extends Partitioner -implements Configurable { +public class HRegionPartitioner extends Partitioner + implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(HRegionPartitioner.class); private Configuration conf = null; @@ -60,24 +57,23 @@ public class HRegionPartitioner private byte[][] startKeys; /** - * Gets the partition number for a given key (hence record) given the total - * number of partitions i.e. number of reduce-tasks for the job. - * - *

    Typically a hash function on a all or a subset of the key.

    - * - * @param key The key to be partitioned. - * @param value The entry value. - * @param numPartitions The total number of partitions. + * Gets the partition number for a given key (hence record) given the total number of partitions + * i.e. number of reduce-tasks for the job. + *

    + * Typically a hash function on a all or a subset of the key. + *

    + * @param key The key to be partitioned. + * @param value The entry value. + * @param numPartitions The total number of partitions. * @return The partition number for the key. - * @see org.apache.hadoop.mapreduce.Partitioner#getPartition( - * java.lang.Object, java.lang.Object, int) + * @see org.apache.hadoop.mapreduce.Partitioner#getPartition( java.lang.Object, java.lang.Object, + * int) */ @Override - public int getPartition(ImmutableBytesWritable key, - VALUE value, int numPartitions) { + public int getPartition(ImmutableBytesWritable key, VALUE value, int numPartitions) { byte[] region = null; // Only one region return 0 - if (this.startKeys.length == 1){ + if (this.startKeys.length == 1) { return 0; } try { @@ -87,12 +83,11 @@ public int getPartition(ImmutableBytesWritable key, } catch (IOException e) { LOG.error(e.toString(), e); } - for (int i = 0; i < this.startKeys.length; i++){ - if (Bytes.compareTo(region, this.startKeys[i]) == 0 ){ - if (i >= numPartitions){ + for (int i = 0; i < this.startKeys.length; i++) { + if (Bytes.compareTo(region, this.startKeys[i]) == 0) { + if (i >= numPartitions) { // cover if we have less reduces then regions. - return (Integer.toString(i).hashCode() - & Integer.MAX_VALUE) % numPartitions; + return (Integer.toString(i).hashCode() & Integer.MAX_VALUE) % numPartitions; } return i; } @@ -103,7 +98,6 @@ public int getPartition(ImmutableBytesWritable key, /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -113,12 +107,9 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to determine the start keys for the - * given table. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * Sets the configuration. This is used to determine the start keys for the given table. + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java index 5ec7c48fb03d..e85a263de72a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/HashTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -220,9 +220,9 @@ Scan initScan() throws IOException { } /** - * Choose partitions between row ranges to hash to a single output file - * Selects region boundaries that fall within the scan range, and groups them - * into the desired number of partitions. + * Choose partitions between row ranges to hash to a single output file Selects region + * boundaries that fall within the scan range, and groups them into the desired number of + * partitions. */ void selectPartitions(Pair regionStartEndKeys) { List startKeys = new ArrayList<>(); @@ -232,13 +232,13 @@ void selectPartitions(Pair regionStartEndKeys) { // if scan begins after this region, or starts before this region, then drop this region // in other words: - // IF (scan begins before the end of this region - // AND scan ends before the start of this region) - // THEN include this region + // IF (scan begins before the end of this region + // AND scan ends before the start of this region) + // THEN include this region if ((isTableStartRow(startRow) || isTableEndRow(regionEndKey) || Bytes.compareTo(startRow, regionEndKey) < 0) - && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey) - || Bytes.compareTo(stopRow, regionStartKey) > 0)) { + && (isTableEndRow(stopRow) || isTableStartRow(regionStartKey) + || Bytes.compareTo(stopRow, regionStartKey) > 0)) { startKeys.add(regionStartKey); } } @@ -267,8 +267,8 @@ void selectPartitions(Pair regionStartEndKeys) { void writePartitionFile(Configuration conf, Path path) throws IOException { FileSystem fs = path.getFileSystem(conf); @SuppressWarnings("deprecation") - SequenceFile.Writer writer = SequenceFile.createWriter( - fs, conf, path, ImmutableBytesWritable.class, NullWritable.class); + SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, path, + ImmutableBytesWritable.class, NullWritable.class); for (int i = 0; i < partitions.size(); i++) { writer.append(partitions.get(i), NullWritable.get()); @@ -277,7 +277,7 @@ void writePartitionFile(Configuration conf, Path path) throws IOException { } private void readPartitionFile(FileSystem fs, Configuration conf, Path path) - throws IOException { + throws IOException { @SuppressWarnings("deprecation") SequenceFile.Reader reader = new SequenceFile.Reader(fs, path, conf); ImmutableBytesWritable key = new ImmutableBytesWritable(); @@ -351,15 +351,15 @@ public class Reader implements java.io.Closeable { int partitionIndex = Collections.binarySearch(partitions, startKey); if (partitionIndex >= 0) { // if the key is equal to a partition, then go the file after that partition - hashFileIndex = partitionIndex+1; + hashFileIndex = partitionIndex + 1; } else { // if the key is between partitions, then go to the file between those partitions - hashFileIndex = -1-partitionIndex; + hashFileIndex = -1 - partitionIndex; } openHashFile(); // MapFile's don't make it easy to seek() so that the subsequent next() returns - // the desired key/value pair. So we cache it for the first call of next(). + // the desired key/value pair. So we cache it for the first call of next(). hash = new ImmutableBytesWritable(); key = (ImmutableBytesWritable) mapFileReader.getClosest(startKey, hash); if (key == null) { @@ -371,8 +371,8 @@ public class Reader implements java.io.Closeable { } /** - * Read the next key/hash pair. - * Returns true if such a pair exists and false when at the end of the data. + * Read the next key/hash pair. Returns true if such a pair exists and false when at the end + * of the data. */ public boolean next() throws IOException { if (cachedNext) { @@ -443,19 +443,19 @@ public Job createSubmittableJob(String[] args) throws IOException { generatePartitions(partitionsPath); Job job = Job.getInstance(getConf(), - getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); + getConf().get("mapreduce.job.name", "hashTable_" + tableHash.tableName)); Configuration jobConf = job.getConfiguration(); jobConf.setLong(HASH_BATCH_SIZE_CONF_KEY, tableHash.batchSize); jobConf.setBoolean(IGNORE_TIMESTAMPS, tableHash.ignoreTimestamps); job.setJarByClass(HashTable.class); TableMapReduceUtil.initTableMapperJob(tableHash.tableName, tableHash.initScan(), - HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); + HashMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); // use a TotalOrderPartitioner and reducers to group region output into hash files job.setPartitionerClass(TotalOrderPartitioner.class); TotalOrderPartitioner.setPartitionFile(jobConf, partitionsPath); - job.setReducerClass(Reducer.class); // identity reducer + job.setReducerClass(Reducer.class); // identity reducer job.setNumReduceTasks(tableHash.numHashFiles); job.setOutputKeyClass(ImmutableBytesWritable.class); job.setOutputValueClass(ImmutableBytesWritable.class); @@ -467,8 +467,8 @@ public Job createSubmittableJob(String[] args) throws IOException { private void generatePartitions(Path partitionsPath) throws IOException { Connection connection = ConnectionFactory.createConnection(getConf()); - Pair regionKeys - = connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); + Pair regionKeys = + connection.getRegionLocator(TableName.valueOf(tableHash.tableName)).getStartEndKeys(); connection.close(); tableHash.selectPartitions(regionKeys); @@ -556,7 +556,7 @@ public long getBatchSize() { } public static class HashMapper - extends TableMapper { + extends TableMapper { private ResultHasher hasher; private long targetBatchSize; @@ -565,11 +565,10 @@ public static class HashMapper @Override protected void setup(Context context) throws IOException, InterruptedException { - targetBatchSize = context.getConfiguration() - .getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); + targetBatchSize = + context.getConfiguration().getLong(HASH_BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); hasher = new ResultHasher(); - hasher.ignoreTimestamps = context.getConfiguration(). - getBoolean(IGNORE_TIMESTAMPS, false); + hasher.ignoreTimestamps = context.getConfiguration().getBoolean(IGNORE_TIMESTAMPS, false); TableSplit split = (TableSplit) context.getInputSplit(); hasher.startBatch(new ImmutableBytesWritable(split.getStartRow())); } @@ -612,6 +611,7 @@ private void completeManifest() throws IOException { } private static final int NUM_ARGS = 2; + private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); @@ -646,8 +646,8 @@ private static void printUsage(final String errorMsg) { System.err.println(); System.err.println("Examples:"); System.err.println(" To hash 'TestTable' in 32kB batches for a 1 hour window into 50 files:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50" + System.err.println(" $ hbase " + + "org.apache.hadoop.hbase.mapreduce.HashTable --batchsize=32000 --numhashfiles=50" + " --starttime=1265875194289 --endtime=1265878794289 --families=cf2,cf3" + " TestTable /hashes/testTable"); } @@ -659,8 +659,8 @@ private boolean doCommandLine(final String[] args) { } try { - tableHash.tableName = args[args.length-2]; - destPath = new Path(args[args.length-1]); + tableHash.tableName = args[args.length - 2]; + destPath = new Path(args[args.length - 1]); for (int i = 0; i < args.length - NUM_ARGS; i++) { String cmd = args[i]; @@ -731,8 +731,8 @@ private boolean doCommandLine(final String[] args) { final String ignoreTimestampsKey = "--ignoreTimestamps="; if (cmd.startsWith(ignoreTimestampsKey)) { - tableHash.ignoreTimestamps = Boolean. - parseBoolean(cmd.substring(ignoreTimestampsKey.length())); + tableHash.ignoreTimestamps = + Boolean.parseBoolean(cmd.substring(ignoreTimestampsKey.length())); continue; } @@ -741,8 +741,8 @@ private boolean doCommandLine(final String[] args) { } if ((tableHash.startTime != 0 || tableHash.endTime != 0) && (tableHash.startTime >= tableHash.endTime)) { - printUsage("Invalid time range filter: starttime=" - + tableHash.startTime + " >= endtime=" + tableHash.endTime); + printUsage("Invalid time range filter: starttime=" + tableHash.startTime + " >= endtime=" + + tableHash.endTime); return false; } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java index 831607c730c5..1e896d301dfa 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,48 +18,43 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Job; +import org.apache.yetus.audience.InterfaceAudience; /** * Pass the given key and record as-is to the reduce phase. */ @InterfaceAudience.Public -public class IdentityTableMapper -extends TableMapper { +public class IdentityTableMapper extends TableMapper { /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name. - * @param scan The scan with the columns to scan. - * @param mapper The mapper class. - * @param job The job configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name. + * @param scan The scan with the columns to scan. + * @param mapper The mapper class. + * @param job The job configuration. * @throws IOException When setting up the job fails. */ @SuppressWarnings("rawtypes") - public static void initJob(String table, Scan scan, - Class mapper, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(table, scan, mapper, - ImmutableBytesWritable.class, Result.class, job); + public static void initJob(String table, Scan scan, Class mapper, Job job) + throws IOException { + TableMapReduceUtil.initTableMapperJob(table, scan, mapper, ImmutableBytesWritable.class, + Result.class, job); } /** * Pass the key, value to reduce. - * - * @param key The current key. - * @param value The current value. - * @param context The current context. + * @param key The current key. + * @param value The current value. + * @param context The current context. * @throws IOException When writing the record fails. * @throws InterruptedException When the job is aborted. */ public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { context.write(key, value); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java index 876953c862b3..e014f9e0c60c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/IdentityTableReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,60 +18,50 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - +import org.apache.hadoop.hbase.client.Mutation; +import org.apache.hadoop.io.Writable; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.Mutation; -import org.apache.hadoop.io.Writable; /** * Convenience class that simply writes all values (which must be - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} instances) - * passed to it out to the configured HBase table. This works in combination - * with {@link TableOutputFormat} which actually does the writing to HBase.

    - * - * Keys are passed along but ignored in TableOutputFormat. However, they can - * be used to control how your values will be divided up amongst the specified - * number of reducers.

    - * - * You can also use the {@link TableMapReduceUtil} class to set up the two - * classes in one step: + * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete + * Delete} instances) passed to it out to the configured HBase table. This works in combination with + * {@link TableOutputFormat} which actually does the writing to HBase. + *

    + * Keys are passed along but ignored in TableOutputFormat. However, they can be used to control how + * your values will be divided up amongst the specified number of reducers. + *

    + * You can also use the {@link TableMapReduceUtil} class to set up the two classes in one step: *

    * TableMapReduceUtil.initTableReducerJob("table", IdentityTableReducer.class, job); - *
    - * This will also set the proper {@link TableOutputFormat} which is given the - * table parameter. The - * {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} define the - * row and columns implicitly. + *
    This will also set the proper {@link TableOutputFormat} which is given the + * table parameter. The {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} define the row and columns implicitly. */ @InterfaceAudience.Public -public class IdentityTableReducer -extends TableReducer { +public class IdentityTableReducer extends TableReducer { @SuppressWarnings("unused") private static final Logger LOG = LoggerFactory.getLogger(IdentityTableReducer.class); /** - * Writes each given record, consisting of the row key and the given values, - * to the configured {@link org.apache.hadoop.mapreduce.OutputFormat}. - * It is emitting the row key and each {@link org.apache.hadoop.hbase.client.Put Put} - * or {@link org.apache.hadoop.hbase.client.Delete Delete} as separate pairs. - * - * @param key The current row key. - * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or - * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given - * row. - * @param context The context of the reduce. + * Writes each given record, consisting of the row key and the given values, to the configured + * {@link org.apache.hadoop.mapreduce.OutputFormat}. It is emitting the row key and each + * {@link org.apache.hadoop.hbase.client.Put Put} or {@link org.apache.hadoop.hbase.client.Delete + * Delete} as separate pairs. + * @param key The current row key. + * @param values The {@link org.apache.hadoop.hbase.client.Put Put} or + * {@link org.apache.hadoop.hbase.client.Delete Delete} list for the given row. + * @param context The context of the reduce. * @throws IOException When writing the record fails. * @throws InterruptedException When the job gets interrupted. */ @Override public void reduce(Writable key, Iterable values, Context context) - throws IOException, InterruptedException { - for(Mutation putOrDelete : values) { + throws IOException, InterruptedException { + for (Mutation putOrDelete : values) { context.write(key, putOrDelete); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java index edcd3c5e9ea7..a2c040f84f1e 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/Import.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,7 +31,6 @@ import java.util.Map; import java.util.TreeMap; import java.util.UUID; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FileSystem; @@ -41,15 +39,12 @@ import org.apache.hadoop.hbase.CellComparator; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; +import org.apache.hadoop.hbase.PrivateCellUtil; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.ZooKeeperConnectionException; -import org.apache.hadoop.hbase.util.MapReduceExtendedCell; -import org.apache.hadoop.hbase.zookeeper.ZKWatcher; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; @@ -63,7 +58,9 @@ import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.MapReduceExtendedCell; import org.apache.hadoop.hbase.zookeeper.ZKClusterId; +import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.io.WritableComparator; @@ -77,11 +74,11 @@ import org.apache.hadoop.mapreduce.lib.partition.TotalOrderPartitioner; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Import data written by {@link Export}. */ @@ -95,16 +92,16 @@ public class Import extends Configured implements Tool { public final static String FILTER_ARGS_CONF_KEY = "import.filter.args"; public final static String TABLE_NAME = "import.table.name"; public final static String WAL_DURABILITY = "import.wal.durability"; - public final static String HAS_LARGE_RESULT= "import.bulk.hasLargeResult"; + public final static String HAS_LARGE_RESULT = "import.bulk.hasLargeResult"; private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; public static class CellWritableComparablePartitioner extends Partitioner { private static CellWritableComparable[] START_KEYS = null; + @Override - public int getPartition(CellWritableComparable key, Cell value, - int numPartitions) { + public int getPartition(CellWritableComparable key, Cell value, int numPartitions) { for (int i = 0; i < START_KEYS.length; ++i) { if (key.compareTo(START_KEYS[i]) <= 0) { return i; @@ -116,8 +113,7 @@ public int getPartition(CellWritableComparable key, Cell value, } /** - * @deprecated Use {@link CellWritableComparablePartitioner}. Will be removed - * from 3.0 onwards + * @deprecated Use {@link CellWritableComparablePartitioner}. Will be removed from 3.0 onwards */ @Deprecated public static class KeyValueWritableComparablePartitioner @@ -188,15 +184,13 @@ public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) { } - public static class CellWritableComparable - implements WritableComparable { + public static class CellWritableComparable implements WritableComparable { private Cell kv = null; static { // register this comparator - WritableComparator.define(CellWritableComparable.class, - new CellWritableComparator()); + WritableComparator.define(CellWritableComparable.class, new CellWritableComparator()); } public CellWritableComparable() { @@ -263,21 +257,16 @@ protected void reduce(KeyValueWritableComparable row, Iterable kvs, } public static class CellReducer - extends - Reducer { - protected void reduce( - CellWritableComparable row, - Iterable kvs, - Reducer.Context context) + extends Reducer { + protected void reduce(CellWritableComparable row, Iterable kvs, + Reducer.Context context) throws java.io.IOException, InterruptedException { int index = 0; for (Cell kv : kvs) { context.write(new ImmutableBytesWritable(CellUtil.cloneRow(kv)), new MapReduceExtendedCell(kv)); - if (++index % 100 == 0) - context.setStatus("Wrote " + index + " KeyValues, " - + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); + if (++index % 100 == 0) context.setStatus("Wrote " + index + " KeyValues, " + + "and the rowkey whose is being wrote is " + Bytes.toString(kv.getRowArray())); } } } @@ -392,30 +381,26 @@ public void setup(Context context) { } } - public static class CellSortImporter - extends TableMapper { + public static class CellSortImporter extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(CellImporter.class); /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(row.get(), row.getOffset(), row.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null - || !filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), - (short) row.getLength()))) { + if (filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength()))) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -442,8 +427,7 @@ public void setup(Context context) throws IOException { if (startKeys.length != reduceNum) { throw new IOException("Region split after job initialization"); } - CellWritableComparable[] startKeyWraps = - new CellWritableComparable[startKeys.length - 1]; + CellWritableComparable[] startKeyWraps = new CellWritableComparable[startKeys.length - 1]; for (int i = 1; i < startKeys.length; ++i) { startKeyWraps[i - 1] = new CellWritableComparable(KeyValueUtil.createFirstOnRow(startKeys[i])); @@ -456,31 +440,28 @@ public void setup(Context context) throws IOException { /** * A mapper that just writes out KeyValues. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="EQ_COMPARETO_USE_OBJECT_EQUALS", - justification="Writables are going away and this has been this way forever") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "EQ_COMPARETO_USE_OBJECT_EQUALS", + justification = "Writables are going away and this has been this way forever") public static class CellImporter extends TableMapper { private Map cfRenameMap; private Filter filter; private static final Logger LOG = LoggerFactory.getLogger(CellImporter.class); /** - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, Result value, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result value, Context context) throws IOException { try { if (LOG.isTraceEnabled()) { - LOG.trace("Considering the row." - + Bytes.toString(row.get(), row.getOffset(), row.getLength())); + LOG.trace( + "Considering the row." + Bytes.toString(row.get(), row.getOffset(), row.getLength())); } - if (filter == null - || !filter.filterRowKey(PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), - (short) row.getLength()))) { + if (filter == null || !filter.filterRowKey( + PrivateCellUtil.createFirstOnRow(row.get(), row.getOffset(), (short) row.getLength()))) { for (Cell kv : value.rawCells()) { kv = filterKv(filter, kv); // skip if we filtered it out @@ -510,15 +491,13 @@ public static class Importer extends TableMapper cfRenameMap) { - if(cfRenameMap != null) { + if (cfRenameMap != null) { // If there's a rename mapping for this CF, create a new KeyValue byte[] newCfName = cfRenameMap.get(CellUtil.cloneFamily(kv)); if (newCfName != null) { List tags = PrivateCellUtil.getTags(kv); kv = new KeyValue(kv.getRowArray(), // row buffer - kv.getRowOffset(), // row offset - kv.getRowLength(), // row length - newCfName, // CF buffer - 0, // CF offset - newCfName.length, // CF length - kv.getQualifierArray(), // qualifier buffer - kv.getQualifierOffset(), // qualifier offset - kv.getQualifierLength(), // qualifier length - kv.getTimestamp(), // timestamp + kv.getRowOffset(), // row offset + kv.getRowLength(), // row length + newCfName, // CF buffer + 0, // CF offset + newCfName.length, // CF length + kv.getQualifierArray(), // qualifier buffer + kv.getQualifierOffset(), // qualifier offset + kv.getQualifierLength(), // qualifier length + kv.getTimestamp(), // timestamp KeyValue.Type.codeToType(kv.getTypeByte()), // KV Type - kv.getValueArray(), // value buffer - kv.getValueOffset(), // value offset - kv.getValueLength(), // value length - tags.size() == 0 ? null: tags); + kv.getValueArray(), // value buffer + kv.getValueOffset(), // value offset + kv.getValueLength(), // value length + tags.size() == 0 ? null : tags); } } return kv; @@ -736,16 +714,16 @@ private static Cell convertKv(Cell kv, Map cfRenameMap) { private static Map createCfRenameMap(Configuration conf) { Map cfRenameMap = null; String allMappingsPropVal = conf.get(CF_RENAME_PROP); - if(allMappingsPropVal != null) { + if (allMappingsPropVal != null) { // The conf value format should be sourceCf1:destCf1,sourceCf2:destCf2,... String[] allMappings = allMappingsPropVal.split(","); - for (String mapping: allMappings) { - if(cfRenameMap == null) { - cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); + for (String mapping : allMappings) { + if (cfRenameMap == null) { + cfRenameMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); } - String [] srcAndDest = mapping.split(":"); - if(srcAndDest.length != 2) { - continue; + String[] srcAndDest = mapping.split(":"); + if (srcAndDest.length != 2) { + continue; } cfRenameMap.put(srcAndDest[0].getBytes(), srcAndDest[1].getBytes()); } @@ -754,32 +732,34 @@ private static Map createCfRenameMap(Configuration conf) { } /** - *

    Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells - * the mapper how to rename column families. - * - *

    Alternately, instead of calling this function, you could set the configuration key + *

    + * Sets a configuration property with key {@link #CF_RENAME_PROP} in conf that tells the mapper + * how to rename column families. + *

    + * Alternately, instead of calling this function, you could set the configuration key * {@link #CF_RENAME_PROP} yourself. The value should look like - *

    srcCf1:destCf1,srcCf2:destCf2,....
    . This would have the same effect on - * the mapper behavior. - * - * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be - * set + * + *
    +   * srcCf1:destCf1,srcCf2:destCf2,....
    +   * 
    + * + * . This would have the same effect on the mapper behavior. + * @param conf the Configuration in which the {@link #CF_RENAME_PROP} key will be set * @param renameMap a mapping from source CF names to destination CF names */ - static public void configureCfRenaming(Configuration conf, - Map renameMap) { + static public void configureCfRenaming(Configuration conf, Map renameMap) { StringBuilder sb = new StringBuilder(); - for(Map.Entry entry: renameMap.entrySet()) { + for (Map.Entry entry : renameMap.entrySet()) { String sourceCf = entry.getKey(); String destCf = entry.getValue(); - if(sourceCf.contains(":") || sourceCf.contains(",") || - destCf.contains(":") || destCf.contains(",")) { - throw new IllegalArgumentException("Illegal character in CF names: " - + sourceCf + ", " + destCf); + if (sourceCf.contains(":") || sourceCf.contains(",") || destCf.contains(":") + || destCf.contains(",")) { + throw new IllegalArgumentException( + "Illegal character in CF names: " + sourceCf + ", " + destCf); } - if(sb.length() != 0) { + if (sb.length() != 0) { sb.append(","); } sb.append(sourceCf + ":" + destCf); @@ -806,8 +786,7 @@ public static void addFilterAndArguments(Configuration conf, Class 0) { @@ -907,11 +885,10 @@ private static void usage(final String errorMsg) { System.err.println(" -D " + JOB_NAME_CONF_KEY + "=jobName - use the specified mapreduce job name for the import"); System.err.println("For performance consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" - + " -Dmapreduce.reduce.speculative=false\n" + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false\n" + " -D" + WAL_DURABILITY + "="); + + " Allowed values are the supported durability values" + + " like SKIP_WAL/ASYNC_WAL/SYNC_WAL/...>"); } /** @@ -920,8 +897,8 @@ private static void usage(final String errorMsg) { * present in the Write Ahead Log to replay in scenarios of a crash. This method flushes all the * regions of the table in the scenarios of import data to hbase with {@link Durability#SKIP_WAL} */ - public static void flushRegionsIfNecessary(Configuration conf) throws IOException, - InterruptedException { + public static void flushRegionsIfNecessary(Configuration conf) + throws IOException, InterruptedException { String tableName = conf.get(TABLE_NAME); Admin hAdmin = null; Connection connection = null; @@ -957,7 +934,7 @@ public int run(String[] args) throws Exception { } Job job = createSubmittableJob(getConf(), args); boolean isJobSuccessful = job.waitForCompletion(true); - if(isJobSuccessful){ + if (isJobSuccessful) { // Flush all the regions of the table flushRegionsIfNecessary(getConf()); } @@ -966,8 +943,8 @@ public int run(String[] args) throws Exception { if (outputRecords < inputRecords) { System.err.println("Warning, not all records were imported (maybe filtered out)."); if (outputRecords == 0) { - System.err.println("If the data was exported from HBase 0.94 "+ - "consider using -Dhbase.import.version=0.94."); + System.err.println("If the data was exported from HBase 0.94 " + + "consider using -Dhbase.import.version=0.94."); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java index ba0be03b8c06..6c316a31d08d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ImportTsv.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +25,6 @@ import java.util.Base64; import java.util.HashSet; import java.util.Set; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -38,15 +36,14 @@ import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotEnabledException; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Table; +import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; @@ -60,19 +57,17 @@ import org.apache.hadoop.security.Credentials; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; -import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Splitter; import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * Tool to import data from a TSV file. - * - * This tool is rather simplistic - it doesn't do any quoting or - * escaping, but is useful for many data loads. - * + * Tool to import data from a TSV file. This tool is rather simplistic - it doesn't do any quoting + * or escaping, but is useful for many data loads. * @see ImportTsv#usage(String) */ @InterfaceAudience.Public @@ -96,8 +91,8 @@ public class ImportTsv extends Configured implements Tool { public final static String COLUMNS_CONF_KEY = "importtsv.columns"; public final static String SEPARATOR_CONF_KEY = "importtsv.separator"; public final static String ATTRIBUTE_SEPERATOR_CONF_KEY = "attributes.seperator"; - //This config is used to propagate credentials from parent MR jobs which launch - //ImportTSV jobs. SEE IntegrationTestImportTsv. + // This config is used to propagate credentials from parent MR jobs which launch + // ImportTSV jobs. SEE IntegrationTestImportTsv. public final static String CREDENTIALS_LOCATION = "credentials_location"; final static String DEFAULT_SEPARATOR = "\t"; final static String DEFAULT_ATTRIBUTES_SEPERATOR = "=>"; @@ -106,8 +101,8 @@ public class ImportTsv extends Configured implements Tool { public final static String CREATE_TABLE_CONF_KEY = "create.table"; public final static String NO_STRICT_COL_FAMILY = "no.strict"; /** - * If table didn't exist and was created in dry-run mode, this flag is - * flipped to delete it when MR ends. + * If table didn't exist and was created in dry-run mode, this flag is flipped to delete it when + * MR ends. */ private static boolean DRY_RUN_TABLE_CREATED; @@ -152,8 +147,8 @@ public static class TsvParser { private int cellTTLColumnIndex = DEFAULT_CELL_TTL_COLUMN_INDEX; /** - * @param columnsSpecification the list of columns to parser out, comma separated. - * The row key should be the special token TsvParser.ROWKEY_COLUMN_SPEC + * @param columnsSpecification the list of columns to parser out, comma separated. The row key + * should be the special token TsvParser.ROWKEY_COLUMN_SPEC * @param separatorStr */ public TsvParser(String columnsSpecification, String separatorStr) { @@ -164,8 +159,8 @@ public TsvParser(String columnsSpecification, String separatorStr) { separatorByte = separator[0]; // Configure columns - ArrayList columnStrings = Lists.newArrayList( - Splitter.on(',').trimResults().split(columnsSpecification)); + ArrayList columnStrings = + Lists.newArrayList(Splitter.on(',').trimResults().split(columnsSpecification)); maxColumnCount = columnStrings.size(); families = new byte[maxColumnCount][]; @@ -243,12 +238,12 @@ public int getRowKeyColumnIndex() { public byte[] getFamily(int idx) { return families[idx]; } + public byte[] getQualifier(int idx) { return qualifiers[idx]; } - public ParsedLine parse(byte[] lineBytes, int length) - throws BadTsvLineException { + public ParsedLine parse(byte[] lineBytes, int length) throws BadTsvLineException { // Enumerate separator offsets ArrayList tabOffsets = new ArrayList<>(maxColumnCount); for (int i = 0; i < length; i++) { @@ -266,8 +261,7 @@ public ParsedLine parse(byte[] lineBytes, int length) throw new BadTsvLineException("Excessive columns"); } else if (tabOffsets.size() <= getRowKeyColumnIndex()) { throw new BadTsvLineException("No row key"); - } else if (hasTimestamp() - && tabOffsets.size() <= getTimestampKeyColumnIndex()) { + } else if (hasTimestamp() && tabOffsets.size() <= getTimestampKeyColumnIndex()) { throw new BadTsvLineException("No timestamp"); } else if (hasAttributes() && tabOffsets.size() <= getAttributesKeyColumnIndex()) { throw new BadTsvLineException("No attributes specified"); @@ -291,6 +285,7 @@ class ParsedLine { public int getRowKeyOffset() { return getColumnOffset(rowKeyColumnIndex); } + public int getRowKeyLength() { return getColumnLength(rowKeyColumnIndex); } @@ -301,9 +296,8 @@ public long getTimestamp(long ts) throws BadTsvLineException { return ts; } - String timeStampStr = Bytes.toString(lineBytes, - getColumnOffset(timestampKeyColumnIndex), - getColumnLength(timestampKeyColumnIndex)); + String timeStampStr = Bytes.toString(lineBytes, getColumnOffset(timestampKeyColumnIndex), + getColumnLength(timestampKeyColumnIndex)); try { return Long.parseLong(timeStampStr); } catch (NumberFormatException nfe) { @@ -317,7 +311,7 @@ private String getAttributes() { return null; } else { return Bytes.toString(lineBytes, getColumnOffset(attrKeyColumnIndex), - getColumnLength(attrKeyColumnIndex)); + getColumnLength(attrKeyColumnIndex)); } } @@ -367,7 +361,7 @@ public String getCellVisibility() { return null; } else { return Bytes.toString(lineBytes, getColumnOffset(cellVisibilityColumnIndex), - getColumnLength(cellVisibilityColumnIndex)); + getColumnLength(cellVisibilityColumnIndex)); } } @@ -392,22 +386,23 @@ public long getCellTTL() { return 0; } else { return Bytes.toLong(lineBytes, getColumnOffset(cellTTLColumnIndex), - getColumnLength(cellTTLColumnIndex)); + getColumnLength(cellTTLColumnIndex)); } } public int getColumnOffset(int idx) { - if (idx > 0) - return tabOffsets.get(idx - 1) + 1; - else - return 0; + if (idx > 0) return tabOffsets.get(idx - 1) + 1; + else return 0; } + public int getColumnLength(int idx) { return tabOffsets.get(idx) - getColumnOffset(idx); } + public int getColumnCount() { return tabOffsets.size(); } + public byte[] getLineBytes() { return lineBytes; } @@ -417,6 +412,7 @@ public static class BadTsvLineException extends Exception { public BadTsvLineException(String err) { super(err); } + private static final long serialVersionUID = 1L; } @@ -444,9 +440,8 @@ public Pair parseRowKey(byte[] lineBytes, int length) } } if (i == length) { - throw new BadTsvLineException( - "Row key does not exist as number of columns in the line" - + " are less than row key position."); + throw new BadTsvLineException("Row key does not exist as number of columns in the line" + + " are less than row key position."); } } return new Pair<>(startPos, endPos - startPos + 1); @@ -455,9 +450,8 @@ public Pair parseRowKey(byte[] lineBytes, int length) /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -472,16 +466,17 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) String actualSeparator = conf.get(SEPARATOR_CONF_KEY); if (actualSeparator != null) { conf.set(SEPARATOR_CONF_KEY, - Bytes.toString(Base64.getEncoder().encode(actualSeparator.getBytes()))); + Bytes.toString(Base64.getEncoder().encode(actualSeparator.getBytes()))); } // See if a non-default Mapper was set String mapperClassName = conf.get(MAPPER_CONF_KEY); - Class mapperClass = mapperClassName != null? Class.forName(mapperClassName): DEFAULT_MAPPER; + Class mapperClass = + mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER; TableName tableName = TableName.valueOf(args[0]); Path inputDir = new Path(args[1]); - String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString()); + String jobName = conf.get(JOB_NAME_CONF_KEY, NAME + "_" + tableName.getNameAsString()); job = Job.getInstance(conf, jobName); job.setJarByClass(mapperClass); FileInputFormat.setInputPaths(job, inputDir); @@ -490,7 +485,7 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) job.setMapOutputKeyClass(ImmutableBytesWritable.class); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); String[] columns = conf.getStrings(COLUMNS_CONF_KEY); - if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { + if (StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { String fileLoc = conf.get(CREDENTIALS_LOCATION); Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf); job.getCredentials().addAll(cred); @@ -510,9 +505,8 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) } } } else { - String errorMsg = - format("Table '%s' does not exist and '%s' is set to no.", tableName, - CREATE_TABLE_CONF_KEY); + String errorMsg = format("Table '%s' does not exist and '%s' is set to no.", + tableName, CREATE_TABLE_CONF_KEY); LOG.error(errorMsg); throw new TableNotFoundException(errorMsg); } @@ -521,26 +515,24 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) RegionLocator regionLocator = connection.getRegionLocator(tableName)) { boolean noStrict = conf.getBoolean(NO_STRICT_COL_FAMILY, false); // if no.strict is false then check column family - if(!noStrict) { + if (!noStrict) { ArrayList unmatchedFamilies = new ArrayList<>(); Set cfSet = getColumnFamilies(columns); TableDescriptor tDesc = table.getDescriptor(); for (String cf : cfSet) { - if(!tDesc.hasColumnFamily(Bytes.toBytes(cf))) { + if (!tDesc.hasColumnFamily(Bytes.toBytes(cf))) { unmatchedFamilies.add(cf); } } - if(unmatchedFamilies.size() > 0) { + if (unmatchedFamilies.size() > 0) { ArrayList familyNames = new ArrayList<>(); for (ColumnFamilyDescriptor family : table.getDescriptor().getColumnFamilies()) { familyNames.add(family.getNameAsString()); } - String msg = - "Column Families " + unmatchedFamilies + " specified in " + COLUMNS_CONF_KEY - + " does not match with any of the table " + tableName + String msg = "Column Families " + unmatchedFamilies + " specified in " + + COLUMNS_CONF_KEY + " does not match with any of the table " + tableName + " column families " + familyNames + ".\n" - + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY - + "=true.\n"; + + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY + "=true.\n"; usage(msg); System.exit(-1); } @@ -557,7 +549,7 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), - regionLocator); + regionLocator); } } } else { @@ -583,13 +575,15 @@ protected static Job createSubmittableJob(Configuration conf, String[] args) if (isDryRun) { job.setOutputFormatClass(NullOutputFormat.class); job.getConfiguration().setStrings("io.serializations", - job.getConfiguration().get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + job.getConfiguration().get("io.serializations"), MutationSerialization.class.getName(), + ResultSerialization.class.getName(), CellSerialization.class.getName()); } TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - org.apache.hbase.thirdparty.com.google.common.base.Function.class /* Guava used by TsvParser */); + org.apache.hbase.thirdparty.com.google.common.base.Function.class /* + * Guava used by + * TsvParser + */); } } return job; @@ -603,15 +597,15 @@ private static void createTable(Admin admin, TableName tableName, String[] colum HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(cf)); htd.addFamily(hcd); } - LOG.warn(format("Creating table '%s' with '%s' columns and default descriptors.", - tableName, cfSet)); + LOG.warn( + format("Creating table '%s' with '%s' columns and default descriptors.", tableName, cfSet)); admin.createTable(htd); } private static void deleteTable(Configuration conf, String[] args) { TableName tableName = TableName.valueOf(args[0]); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { try { admin.disableTable(tableName); } catch (TableNotEnabledException e) { @@ -619,8 +613,7 @@ private static void deleteTable(Configuration conf, String[] args) { } admin.deleteTable(tableName); } catch (IOException e) { - LOG.error(format("***Dry run: Failed to delete table '%s'.***%n%s", tableName, - e.toString())); + LOG.error(format("***Dry run: Failed to delete table '%s'.***%n%s", tableName, e.toString())); return; } LOG.info(format("Dry run: Deleted table '%s'.", tableName)); @@ -642,64 +635,57 @@ private static Set getColumnFamilies(String[] columns) { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); } - String usage = - "Usage: " + NAME + " -D"+ COLUMNS_CONF_KEY + "=a,b,c \n" + - "\n" + - "Imports the given input directory of TSV data into the specified table.\n" + - "\n" + - "The column names of the TSV data must be specified using the -D" + COLUMNS_CONF_KEY + "\n" + - "option. This option takes the form of comma-separated column names, where each\n" + - "column name is either a simple column family, or a columnfamily:qualifier. The special\n" + - "column name " + TsvParser.ROWKEY_COLUMN_SPEC + " is used to designate that this column should be used\n" + - "as the row key for each imported record. You must specify exactly one column\n" + - "to be the row key, and you must specify a column name for every column that exists in the\n" + - "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + - " designates that this column should be\n" + - "used as timestamp for each record. Unlike " + TsvParser.ROWKEY_COLUMN_SPEC + ", " + - TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + "\n" + - "You must specify at most one column as timestamp key for each imported record.\n" + - "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + - "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + "' option will be ignored.\n" + - "\n" + - "Other special columns that can be specified are " + TsvParser.CELL_TTL_COLUMN_SPEC + - " and " + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + ".\n" + - TsvParser.CELL_TTL_COLUMN_SPEC + " designates that this column will be used " + - "as a Cell's Time To Live (TTL) attribute.\n" + - TsvParser.CELL_VISIBILITY_COLUMN_SPEC + " designates that this column contains the " + - "visibility label expression.\n" + - "\n" + - TsvParser.ATTRIBUTES_COLUMN_SPEC+" can be used to specify Operation Attributes per record.\n"+ - " Should be specified as key=>value where "+TsvParser.DEFAULT_ATTRIBUTES_COLUMN_INDEX+ " is used \n"+ - " as the seperator. Note that more than one OperationAttributes can be specified.\n"+ - "By default importtsv will load data directly into HBase. To instead generate\n" + - "HFiles of data to prepare for a bulk data load, pass the option:\n" + - " -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output\n" + - " Note: if you do not use this option, then the target table must already exist in HBase\n" + - "\n" + - "Other options that may be specified with -D include:\n" + - " -D" + DRY_RUN_CONF_KEY + "=true - Dry run mode. Data is not actually populated into" + - " table. If table does not exist, it is created but deleted in the end.\n" + - " -D" + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + - " -D" + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to stderr\n" + - " -D" + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns in bulk import\n" + - " '-D" + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + - " -D" + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n" + - " -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use instead of " + - DEFAULT_MAPPER.getName() + "\n" + - " -D" + JOB_NAME_CONF_KEY + "=jobName - use the specified mapreduce job name for the import\n" + - " -D" + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" + - " Note: if you set this to 'no', then the target table must already exist in HBase\n" + - " -D" + NO_STRICT_COL_FAMILY + "=true - ignore column family check in hbase table. " + - "Default is false\n\n" + - "For performance consider the following options:\n" + - " -Dmapreduce.map.speculative=false\n" + - " -Dmapreduce.reduce.speculative=false"; + String usage = "Usage: " + NAME + " -D" + COLUMNS_CONF_KEY + "=a,b,c \n" + + "\n" + "Imports the given input directory of TSV data into the specified table.\n" + "\n" + + "The column names of the TSV data must be specified using the -D" + COLUMNS_CONF_KEY + + "\n" + "option. This option takes the form of comma-separated column names, where each\n" + + "column name is either a simple column family, or a columnfamily:qualifier. The special\n" + + "column name " + TsvParser.ROWKEY_COLUMN_SPEC + + " is used to designate that this column should be used\n" + + "as the row key for each imported record. You must specify exactly one column\n" + + "to be the row key, and you must specify a column name for every column that exists in the\n" + + "input data. Another special column" + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + + " designates that this column should be\n" + "used as timestamp for each record. Unlike " + + TsvParser.ROWKEY_COLUMN_SPEC + ", " + TsvParser.TIMESTAMPKEY_COLUMN_SPEC + " is optional." + + "\n" + "You must specify at most one column as timestamp key for each imported record.\n" + + "Record with invalid timestamps (blank, non-numeric) will be treated as bad record.\n" + + "Note: if you use this option, then '" + TIMESTAMP_CONF_KEY + + "' option will be ignored.\n" + "\n" + "Other special columns that can be specified are " + + TsvParser.CELL_TTL_COLUMN_SPEC + " and " + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + ".\n" + + TsvParser.CELL_TTL_COLUMN_SPEC + " designates that this column will be used " + + "as a Cell's Time To Live (TTL) attribute.\n" + TsvParser.CELL_VISIBILITY_COLUMN_SPEC + + " designates that this column contains the " + "visibility label expression.\n" + "\n" + + TsvParser.ATTRIBUTES_COLUMN_SPEC + + " can be used to specify Operation Attributes per record.\n" + + " Should be specified as key=>value where " + TsvParser.DEFAULT_ATTRIBUTES_COLUMN_INDEX + + " is used \n" + + " as the seperator. Note that more than one OperationAttributes can be specified.\n" + + "By default importtsv will load data directly into HBase. To instead generate\n" + + "HFiles of data to prepare for a bulk data load, pass the option:\n" + " -D" + + BULK_OUTPUT_CONF_KEY + "=/path/for/output\n" + + " Note: if you do not use this option, then the target table must already exist in HBase\n" + + "\n" + "Other options that may be specified with -D include:\n" + " -D" + + DRY_RUN_CONF_KEY + "=true - Dry run mode. Data is not actually populated into" + + " table. If table does not exist, it is created but deleted in the end.\n" + " -D" + + SKIP_LINES_CONF_KEY + "=false - fail if encountering an invalid line\n" + " -D" + + LOG_BAD_LINES_CONF_KEY + "=true - logs invalid lines to stderr\n" + " -D" + + SKIP_EMPTY_COLUMNS + "=false - If true then skip empty columns in bulk import\n" + " '-D" + + SEPARATOR_CONF_KEY + "=|' - eg separate on pipes instead of tabs\n" + " -D" + + TIMESTAMP_CONF_KEY + "=currentTimeAsLong - use the specified timestamp for the import\n" + + " -D" + MAPPER_CONF_KEY + "=my.Mapper - A user-defined Mapper to use instead of " + + DEFAULT_MAPPER.getName() + "\n" + " -D" + JOB_NAME_CONF_KEY + + "=jobName - use the specified mapreduce job name for the import\n" + " -D" + + CREATE_TABLE_CONF_KEY + "=no - can be used to avoid creation of table by this tool\n" + + " Note: if you set this to 'no', then the target table must already exist in HBase\n" + + " -D" + NO_STRICT_COL_FAMILY + "=true - ignore column family check in hbase table. " + + "Default is false\n\n" + "For performance consider the following options:\n" + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"; System.err.println(usage); } @@ -719,8 +705,7 @@ public int run(String[] args) throws Exception { // Make sure columns are specified String[] columns = getConf().getStrings(COLUMNS_CONF_KEY); if (columns == null) { - usage("No columns specified. Please specify with -D" + - COLUMNS_CONF_KEY+"=..."); + usage("No columns specified. Please specify with -D" + COLUMNS_CONF_KEY + "=..."); return -1; } @@ -737,30 +722,27 @@ public int run(String[] args) throws Exception { // Make sure we have at most one column as the timestamp key int tskeysFound = 0; for (String col : columns) { - if (col.equals(TsvParser.TIMESTAMPKEY_COLUMN_SPEC)) - tskeysFound++; + if (col.equals(TsvParser.TIMESTAMPKEY_COLUMN_SPEC)) tskeysFound++; } if (tskeysFound > 1) { - usage("Must specify at most one column as " - + TsvParser.TIMESTAMPKEY_COLUMN_SPEC); + usage("Must specify at most one column as " + TsvParser.TIMESTAMPKEY_COLUMN_SPEC); return -1; } int attrKeysFound = 0; for (String col : columns) { - if (col.equals(TsvParser.ATTRIBUTES_COLUMN_SPEC)) - attrKeysFound++; + if (col.equals(TsvParser.ATTRIBUTES_COLUMN_SPEC)) attrKeysFound++; } if (attrKeysFound > 1) { - usage("Must specify at most one column as " - + TsvParser.ATTRIBUTES_COLUMN_SPEC); + usage("Must specify at most one column as " + TsvParser.ATTRIBUTES_COLUMN_SPEC); return -1; } // Make sure one or more columns are specified excluding rowkey and // timestamp key if (columns.length - (rowkeysFound + tskeysFound + attrKeysFound) < 1) { - usage("One or more columns in addition to the row key and timestamp(optional) are required"); + usage( + "One or more columns in addition to the row key and timestamp(optional) are required"); return -1; } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java index 0127b51ab3fe..91a3a9175b5f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/JarFinder.java @@ -7,14 +7,13 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.mapreduce; @@ -38,18 +37,16 @@ import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; /** - * Finds the Jar for a class. If the class is in a directory in the - * classpath, it creates a Jar on the fly with the contents of the directory - * and returns the path to that Jar. If a Jar is created, it is created in - * the system temporary directory. - * - * This file was forked from hadoop/common/branches/branch-2@1377176. + * Finds the Jar for a class. If the class is in a directory in the classpath, it creates a Jar on + * the fly with the contents of the directory and returns the path to that Jar. If a Jar is created, + * it is created in the system temporary directory. This file was forked from + * hadoop/common/branches/branch-2@1377176. */ @InterfaceAudience.Private public final class JarFinder { - private static void copyToZipStream(File file, ZipEntry entry, - ZipOutputStream zos) throws IOException { + private static void copyToZipStream(File file, ZipEntry entry, ZipOutputStream zos) + throws IOException { InputStream is = new FileInputStream(file); try { zos.putNextEntry(entry); @@ -68,8 +65,7 @@ private static void copyToZipStream(File file, ZipEntry entry, } } - public static void jarDir(File dir, String relativePath, ZipOutputStream zos) - throws IOException { + public static void jarDir(File dir, String relativePath, ZipOutputStream zos) throws IOException { Preconditions.checkNotNull(relativePath, "relativePath"); Preconditions.checkNotNull(zos, "zos"); @@ -89,8 +85,8 @@ public static void jarDir(File dir, String relativePath, ZipOutputStream zos) zos.close(); } - private static void zipDir(File dir, String relativePath, ZipOutputStream zos, - boolean start) throws IOException { + private static void zipDir(File dir, String relativePath, ZipOutputStream zos, boolean start) + throws IOException { String[] dirList = dir.list(); if (dirList == null) { return; @@ -107,8 +103,7 @@ private static void zipDir(File dir, String relativePath, ZipOutputStream zos, String filePath = f.getPath(); File file = new File(filePath); zipDir(file, relativePath + f.getName() + "/", zos, false); - } - else { + } else { String path = relativePath + f.getName(); if (!path.equals(JarFile.MANIFEST_NAME)) { ZipEntry anEntry = new ZipEntry(path); @@ -125,22 +120,18 @@ private static void createJar(File dir, File jarFile) throws IOException { File jarDir = jarFile.getParentFile(); if (!jarDir.exists()) { if (!jarDir.mkdirs()) { - throw new IOException(MessageFormat.format("could not create dir [{0}]", - jarDir)); + throw new IOException(MessageFormat.format("could not create dir [{0}]", jarDir)); } } try (FileOutputStream fos = new FileOutputStream(jarFile); - JarOutputStream jos = new JarOutputStream(fos)) { + JarOutputStream jos = new JarOutputStream(fos)) { jarDir(dir, "", jos); } } /** - * Returns the full path to the Jar containing the class. It always return a - * JAR. - * + * Returns the full path to the Jar containing the class. It always return a JAR. * @param klass class. - * * @return path to the Jar containing the class. */ public static String getJar(Class klass) { @@ -149,8 +140,7 @@ public static String getJar(Class klass) { if (loader != null) { String class_file = klass.getName().replaceAll("\\.", "/") + ".class"; try { - for (Enumeration itr = loader.getResources(class_file); - itr.hasMoreElements(); ) { + for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements();) { URL url = (URL) itr.nextElement(); String path = url.getPath(); if (path.startsWith("file:")) { @@ -160,8 +150,7 @@ public static String getJar(Class klass) { if ("jar".equals(url.getProtocol())) { path = URLDecoder.decode(path, "UTF-8"); return path.replaceAll("!.*$", ""); - } - else if ("file".equals(url.getProtocol())) { + } else if ("file".equals(url.getProtocol())) { String klassName = klass.getName(); klassName = klassName.replace(".", "/") + ".class"; path = path.substring(0, path.length() - klassName.length()); @@ -178,13 +167,13 @@ else if ("file".equals(url.getProtocol())) { return tempJar.getAbsolutePath(); } } - } - catch (IOException e) { + } catch (IOException e) { throw new RuntimeException(e); } } return null; } - private JarFinder() {} + private JarFinder() { + } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java index 3207712f72b3..ad01f8341126 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,18 +22,16 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; + /** - * Use to specify the type of serialization for the mappers - * and reducers - * @deprecated Use {@link CellSerialization}. Will be - * removed from 3.0 onwards + * Use to specify the type of serialization for the mappers and reducers + * @deprecated Use {@link CellSerialization}. Will be removed from 3.0 onwards */ @Deprecated @InterfaceAudience.Public @@ -63,7 +61,7 @@ public void close() throws IOException { @Override public KeyValue deserialize(KeyValue ignore) throws IOException { - // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO + // I can't overwrite the passed in KV, not from a proto kv, not just yet. TODO return KeyValueUtil.create(this.dis); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java index 2fb1e7aad1c1..1a5d361fac85 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/KeyValueSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,21 +18,17 @@ package org.apache.hadoop.hbase.mapreduce; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CellComparatorImpl; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted KeyValues. - * Reads in all KeyValues from passed Iterator, sorts them, then emits - * KeyValues in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted KeyValues. Reads in all KeyValues from passed Iterator, sorts them, then emits + * KeyValues in sorted order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 - * @deprecated Use {@link CellSortReducer}. Will be removed from - * 3.0 onwards + * @deprecated Use {@link CellSortReducer}. Will be removed from 3.0 onwards */ @Deprecated @InterfaceAudience.Public @@ -41,9 +36,9 @@ public class KeyValueSortReducer extends Reducer { protected void reduce(ImmutableBytesWritable row, Iterable kvs, Reducer.Context context) - throws java.io.IOException, InterruptedException { + throws java.io.IOException, InterruptedException { TreeSet map = new TreeSet<>(CellComparatorImpl.COMPARATOR); - for (KeyValue kv: kvs) { + for (KeyValue kv : kvs) { try { map.add(kv.clone()); } catch (CloneNotSupportedException e) { @@ -52,7 +47,7 @@ protected void reduce(ImmutableBytesWritable row, Iterable kvs, } context.setStatus("Read " + map.getClass()); int index = 0; - for (KeyValue kv: map) { + for (KeyValue kv : map) { context.write(row, kv); if (++index % 100 == 0) context.setStatus("Wrote " + index); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java index 6410bf8726c6..67b2e8cd434d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableHFileOutputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -6,42 +6,33 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.io.IOException; +import java.nio.charset.Charset; +import java.util.List; import org.apache.hadoop.hbase.client.TableDescriptor; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Job; - -import java.io.IOException; -import java.nio.charset.Charset; -import java.util.List; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Create 3 level tree directory, first level is using table name as parent - * directory and then use family name as child directory, and all related HFiles - * for one family are under child directory - * -tableName1 - * -columnFamilyName1 - * -columnFamilyName2 - * -HFiles - * -tableName2 - * -columnFamilyName1 - * -HFiles - * -columnFamilyName2 + * Create 3 level tree directory, first level is using table name as parent directory and then use + * family name as child directory, and all related HFiles for one family are under child directory + * -tableName1 -columnFamilyName1 -columnFamilyName2 -HFiles -tableName2 -columnFamilyName1 -HFiles + * -columnFamilyName2 */ @InterfaceAudience.Public public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { @@ -50,13 +41,11 @@ public class MultiTableHFileOutputFormat extends HFileOutputFormat2 { /** * Creates a composite key to use as a mapper output key when using * MultiTableHFileOutputFormat.configureIncrementaLoad to set up bulk ingest job - * * @param tableName Name of the Table - Eg: TableName.getNameAsString() - * @param suffix Usually represents a rowkey when creating a mapper key or column family - * @return byte[] representation of composite key + * @param suffix Usually represents a rowkey when creating a mapper key or column family + * @return byte[] representation of composite key */ - public static byte[] createCompositeKey(byte[] tableName, - byte[] suffix) { + public static byte[] createCompositeKey(byte[] tableName, byte[] suffix) { return combineTableNameSuffix(tableName, suffix); } @@ -64,8 +53,7 @@ public static byte[] createCompositeKey(byte[] tableName, * Alternate api which accepts an ImmutableBytesWritable for the suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ - public static byte[] createCompositeKey(byte[] tableName, - ImmutableBytesWritable suffix) { + public static byte[] createCompositeKey(byte[] tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName, suffix.get()); } @@ -74,26 +62,23 @@ public static byte[] createCompositeKey(byte[] tableName, * suffix * @see MultiTableHFileOutputFormat#createCompositeKey(byte[], byte[]) */ - public static byte[] createCompositeKey(String tableName, - ImmutableBytesWritable suffix) { + public static byte[] createCompositeKey(String tableName, ImmutableBytesWritable suffix) { return combineTableNameSuffix(tableName.getBytes(Charset.forName("UTF-8")), suffix.get()); } /** * Analogous to - * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, - * this function will configure the requisite number of reducers to write HFiles for multple - * tables simultaneously - * - * @param job See {@link org.apache.hadoop.mapreduce.Job} + * {@link HFileOutputFormat2#configureIncrementalLoad(Job, TableDescriptor, RegionLocator)}, this + * function will configure the requisite number of reducers to write HFiles for multple tables + * simultaneously + * @param job See {@link org.apache.hadoop.mapreduce.Job} * @param multiTableDescriptors Table descriptor and region locator pairs * @throws IOException */ - public static void configureIncrementalLoad(Job job, List - multiTableDescriptors) + public static void configureIncrementalLoad(Job job, List multiTableDescriptors) throws IOException { MultiTableHFileOutputFormat.configureIncrementalLoad(job, multiTableDescriptors, - MultiTableHFileOutputFormat.class); + MultiTableHFileOutputFormat.class); } final private static int validateCompositeKey(byte[] keyBytes) { @@ -102,8 +87,8 @@ final private static int validateCompositeKey(byte[] keyBytes) { // Either the separator was not found or a tablename wasn't present or a key wasn't present if (separatorIdx == -1) { - throw new IllegalArgumentException("Invalid format for composite key [" + Bytes - .toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); + throw new IllegalArgumentException("Invalid format for composite key [" + + Bytes.toStringBinary(keyBytes) + "]. Cannot extract tablename and suffix from key"); } return separatorIdx; } @@ -115,6 +100,6 @@ protected static byte[] getTableName(byte[] keyBytes) { protected static byte[] getSuffix(byte[] keyBytes) { int separatorIdx = validateCompositeKey(keyBytes); - return Bytes.copy(keyBytes, separatorIdx+1, keyBytes.length - separatorIdx - 1); + return Bytes.copy(keyBytes, separatorIdx + 1, keyBytes.length - separatorIdx - 1); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java index d0cffb325316..bcdd6fb16018 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,16 +20,13 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Scan; +import org.apache.yetus.audience.InterfaceAudience; /** - * Convert HBase tabular data from multiple scanners into a format that - * is consumable by Map/Reduce. - * + * Convert HBase tabular data from multiple scanners into a format that is consumable by Map/Reduce. *

    * Usage example *

    @@ -49,13 +46,12 @@ * scan1.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, table2); * scans.add(scan2); * - * TableMapReduceUtil.initTableMapperJob(scans, TableMapper.class, Text.class, - * IntWritable.class, job); + * TableMapReduceUtil.initTableMapperJob(scans, TableMapper.class, Text.class, IntWritable.class, + * job); *
    */ @InterfaceAudience.Public -public class MultiTableInputFormat extends MultiTableInputFormatBase implements - Configurable { +public class MultiTableInputFormat extends MultiTableInputFormatBase implements Configurable { /** Job parameter that specifies the scan list. */ public static final String SCANS = "hbase.mapreduce.scans"; @@ -65,7 +61,6 @@ public class MultiTableInputFormat extends MultiTableInputFormatBase implements /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -75,20 +70,17 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to set the details for the tables to - * be scanned. - * + * Sets the configuration. This is used to set the details for the tables to be scanned. * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override public void setConf(Configuration configuration) { this.conf = configuration; String[] rawScans = conf.getStrings(SCANS); if (rawScans.length <= 0) { - throw new IllegalArgumentException("There must be at least 1 scan configuration set to : " - + SCANS); + throw new IllegalArgumentException( + "There must be at least 1 scan configuration set to : " + SCANS); } List scans = new ArrayList<>(); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java index d8205c1a0ca1..81c9a8c80eda 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +20,10 @@ import java.io.IOException; import java.text.MessageFormat; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.Map; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HRegionLocation; import org.apache.hadoop.hbase.TableName; @@ -42,18 +41,17 @@ import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import java.util.Map; -import java.util.HashMap; -import java.util.Iterator; /** - * A base for {@link MultiTableInputFormat}s. Receives a list of - * {@link Scan} instances that define the input tables and - * filters etc. Subclasses may use other TableRecordReader implementations. + * A base for {@link MultiTableInputFormat}s. Receives a list of {@link Scan} instances that define + * the input tables and filters etc. Subclasses may use other TableRecordReader implementations. */ @InterfaceAudience.Public -public abstract class MultiTableInputFormatBase extends - InputFormat { +public abstract class MultiTableInputFormatBase + extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(MultiTableInputFormatBase.class); @@ -64,22 +62,18 @@ public abstract class MultiTableInputFormatBase extends private TableRecordReader tableRecordReader = null; /** - * Builds a TableRecordReader. If no TableRecordReader was provided, uses the - * default. - * + * Builds a TableRecordReader. If no TableRecordReader was provided, uses the default. * @param split The split to work with. * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @throws InterruptedException when record reader initialization fails * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) - throws IOException, InterruptedException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException, InterruptedException { TableSplit tSplit = (TableSplit) split; LOG.info(MessageFormat.format("Input split length: {0} bytes.", tSplit.getLength())); @@ -146,9 +140,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } /** - * Calculates the splits that will serve as input for the map tasks. The - * number of splits matches the number of regions in a table. - * + * Calculates the splits that will serve as input for the map tasks. The number of splits matches + * the number of regions in a table. * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. @@ -163,8 +156,7 @@ public List getSplits(JobContext context) throws IOException { Map> tableMaps = new HashMap<>(); for (Scan scan : scans) { byte[] tableNameBytes = scan.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME); - if (tableNameBytes == null) - throw new IOException("A scan object did not have a table name"); + if (tableNameBytes == null) throw new IOException("A scan object did not have a table name"); TableName tableName = TableName.valueOf(tableNameBytes); @@ -185,14 +177,14 @@ public List getSplits(JobContext context) throws IOException { TableName tableName = entry.getKey(); List scanList = entry.getValue(); try (Table table = conn.getTable(tableName); - RegionLocator regionLocator = conn.getRegionLocator(tableName)) { - RegionSizeCalculator sizeCalculator = new RegionSizeCalculator( - regionLocator, conn.getAdmin()); + RegionLocator regionLocator = conn.getRegionLocator(tableName)) { + RegionSizeCalculator sizeCalculator = + new RegionSizeCalculator(regionLocator, conn.getAdmin()); Pair keys = regionLocator.getStartEndKeys(); for (Scan scan : scanList) { if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { - throw new IOException("Expecting at least one region for table : " - + tableName.getNameAsString()); + throw new IOException( + "Expecting at least one region for table : " + tableName.getNameAsString()); } int count = 0; @@ -204,29 +196,26 @@ public List getSplits(JobContext context) throws IOException { continue; } - if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || - Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || Bytes.compareTo(stopRow, - keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? - keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || - Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? - keys.getSecond()[i] : stopRow; - - HRegionLocation hregionLocation = regionLocator.getRegionLocation( - keys.getFirst()[i], false); + if ((startRow.length == 0 || keys.getSecond()[i].length == 0 + || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) + && (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { + byte[] splitStart = + startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 + ? keys.getFirst()[i] + : startRow; + byte[] splitStop = + (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) + && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; + + HRegionLocation hregionLocation = + regionLocator.getRegionLocation(keys.getFirst()[i], false); String regionHostname = hregionLocation.getHostname(); HRegionInfo regionInfo = hregionLocation.getRegionInfo(); String encodedRegionName = regionInfo.getEncodedName(); - long regionSize = sizeCalculator.getRegionSize( - regionInfo.getRegionName()); + long regionSize = sizeCalculator.getRegionSize(regionInfo.getRegionName()); - TableSplit split = new TableSplit(table.getName(), - scan, splitStart, splitStop, regionHostname, - encodedRegionName, regionSize); + TableSplit split = new TableSplit(table.getName(), scan, splitStart, splitStop, + regionHostname, encodedRegionName, regionSize); splits.add(split); @@ -244,29 +233,25 @@ public List getSplits(JobContext context) throws IOException { } /** - * Test if the given region is to be included in the InputSplit while - * splitting the regions of a table. + * Test if the given region is to be included in the InputSplit while splitting the regions of a + * table. *

    - * This optimization is effective when there is a specific reasoning to - * exclude an entire region from the M-R job, (and hence, not contributing to - * the InputSplit), given the start and end keys of the same.
    - * Useful when we need to remember the last-processed top record and revisit - * the [last, current) interval for M-R processing, continuously. In addition - * to reducing InputSplits, reduces the load on the region server as well, due - * to the ordering of the keys.
    + * This optimization is effective when there is a specific reasoning to exclude an entire region + * from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys + * of the same.
    + * Useful when we need to remember the last-processed top record and revisit the [last, current) + * interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the + * load on the region server as well, due to the ordering of the keys.
    + *
    + * Note: It is possible that endKey.length() == 0 , for the last (recent) region. *
    - * Note: It is possible that endKey.length() == 0 , for the last - * (recent) region.
    - * Override this method, if you want to bulk exclude regions altogether from - * M-R. By default, no region is excluded( i.e. all regions are included). - * + * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no + * region is excluded( i.e. all regions are included). * @param startKey Start key of the region * @param endKey End key of the region - * @return true, if this region needs to be included as part of the input - * (default). + * @return true, if this region needs to be included as part of the input (default). */ - protected boolean includeRegionInSplit(final byte[] startKey, - final byte[] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { return true; } @@ -279,7 +264,6 @@ protected List getScans() { /** * Allows subclasses to set the list of {@link Scan} objects. - * * @param scans The list of {@link Scan} used to define the input */ protected void setScans(List scans) { @@ -288,9 +272,7 @@ protected void setScans(List scans) { /** * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader A different {@link TableRecordReader} - * implementation. + * @param tableRecordReader A different {@link TableRecordReader} implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java index 2a4fae944095..ed7bc0706e85 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,10 +20,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.TableName; @@ -32,9 +27,9 @@ import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.JobContext; @@ -42,21 +37,22 @@ import org.apache.hadoop.mapreduce.OutputFormat; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** *

    - * Hadoop output format that writes to one or more HBase tables. The key is - * taken to be the table name while the output value must be either a - * {@link Put} or a {@link Delete} instance. All tables must already exist, and - * all Puts and Deletes must reference only valid column families. + * Hadoop output format that writes to one or more HBase tables. The key is taken to be the table + * name while the output value must be either a {@link Put} or a {@link Delete} instance. + * All tables must already exist, and all Puts and Deletes must reference only valid column + * families. *

    - * *

    - * Write-ahead logging (WAL) for Puts can be disabled by setting - * {@link #WAL_PROPERTY} to {@link #WAL_OFF}. Default value is {@link #WAL_ON}. - * Note that disabling write-ahead logging is only appropriate for jobs where - * loss of data due to region server failure can be tolerated (for example, - * because it is easy to rerun a bulk import). + * Write-ahead logging (WAL) for Puts can be disabled by setting {@link #WAL_PROPERTY} to + * {@link #WAL_OFF}. Default value is {@link #WAL_ON}. Note that disabling write-ahead logging is + * only appropriate for jobs where loss of data due to region server failure can be tolerated (for + * example, because it is easy to rerun a bulk import). *

    */ @InterfaceAudience.Public @@ -67,11 +63,12 @@ public class MultiTableOutputFormat extends OutputFormat { + protected static class MultiTableRecordWriter + extends RecordWriter { private static final Logger LOG = LoggerFactory.getLogger(MultiTableRecordWriter.class); Connection connection; Map mutatorMap = new HashMap<>(); @@ -79,36 +76,31 @@ protected static class MultiTableRecordWriter extends boolean useWriteAheadLogging; /** - * @param conf - * HBaseConfiguration to used - * @param useWriteAheadLogging - * whether to use write ahead logging. This can be turned off ( + * @param conf HBaseConfiguration to used + * @param useWriteAheadLogging whether to use write ahead logging. This can be turned off ( * false) to improve performance when bulk loading data. */ - public MultiTableRecordWriter(Configuration conf, - boolean useWriteAheadLogging) throws IOException { - LOG.debug("Created new MultiTableRecordReader with WAL " - + (useWriteAheadLogging ? "on" : "off")); + public MultiTableRecordWriter(Configuration conf, boolean useWriteAheadLogging) + throws IOException { + LOG.debug( + "Created new MultiTableRecordReader with WAL " + (useWriteAheadLogging ? "on" : "off")); this.conf = conf; this.useWriteAheadLogging = useWriteAheadLogging; } /** - * @param tableName - * the name of the table, as a string + * @param tableName the name of the table, as a string * @return the named mutator - * @throws IOException - * if there is a problem opening a table + * @throws IOException if there is a problem opening a table */ BufferedMutator getBufferedMutator(ImmutableBytesWritable tableName) throws IOException { - if(this.connection == null){ + if (this.connection == null) { this.connection = ConnectionFactory.createConnection(conf); } if (!mutatorMap.containsKey(tableName)) { - LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get())+ "\" for writing"); + LOG.debug("Opening HTable \"" + Bytes.toString(tableName.get()) + "\" for writing"); - BufferedMutator mutator = - connection.getBufferedMutator(TableName.valueOf(tableName.get())); + BufferedMutator mutator = connection.getBufferedMutator(TableName.valueOf(tableName.get())); mutatorMap.put(tableName, mutator); } return mutatorMap.get(tableName); @@ -126,13 +118,9 @@ public void close(TaskAttemptContext context) throws IOException { /** * Writes an action (Put or Delete) to the specified table. - * - * @param tableName - * the table being updated. - * @param action - * the update, either a put or a delete. - * @throws IllegalArgumentException - * if the action is not a put or a delete. + * @param tableName the table being updated. + * @param action the update, either a put or a delete. + * @throws IllegalArgumentException if the action is not a put or a delete. */ @Override public void write(ImmutableBytesWritable tableName, Mutation action) throws IOException { @@ -140,21 +128,17 @@ public void write(ImmutableBytesWritable tableName, Mutation action) throws IOEx // The actions are not immutable, so we defensively copy them if (action instanceof Put) { Put put = new Put((Put) action); - put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL - : Durability.SKIP_WAL); + put.setDurability(useWriteAheadLogging ? Durability.SYNC_WAL : Durability.SKIP_WAL); mutator.mutate(put); } else if (action instanceof Delete) { Delete delete = new Delete((Delete) action); mutator.mutate(delete); - } else - throw new IllegalArgumentException( - "action must be either Delete or Put"); + } else throw new IllegalArgumentException("action must be either Delete or Put"); } } @Override - public void checkOutputSpecs(JobContext context) throws IOException, - InterruptedException { + public void checkOutputSpecs(JobContext context) throws IOException, InterruptedException { // we can't know ahead of time if it's going to blow up when the user // passes a table name that doesn't exist, so nothing useful here. } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java index fa7129030402..99a8054c8dff 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormat.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,44 +15,35 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; -import org.apache.hbase.thirdparty.com.google.common.collect.Lists; +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.JobContext; +import org.apache.yetus.audience.InterfaceAudience; -import java.io.IOException; -import java.util.Collection; -import java.util.List; -import java.util.Map; +import org.apache.hbase.thirdparty.com.google.common.collect.Lists; /** - * MultiTableSnapshotInputFormat generalizes - * {@link TableSnapshotInputFormat} - * allowing a MapReduce job to run over one or more table snapshots, with one or more scans - * configured for each. - * Internally, the input format delegates to - * {@link TableSnapshotInputFormat} - * and thus has the same performance advantages; - * see {@link TableSnapshotInputFormat} for - * more details. - * Usage is similar to TableSnapshotInputFormat, with the following exception: - * initMultiTableSnapshotMapperJob takes in a map - * from snapshot name to a collection of scans. For each snapshot in the map, each corresponding - * scan will be applied; - * the overall dataset for the job is defined by the concatenation of the regions and tables - * included in each snapshot/scan - * pair. - * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob - * (java.util.Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, org.apache - * .hadoop.fs.Path)} + * MultiTableSnapshotInputFormat generalizes {@link TableSnapshotInputFormat} allowing a MapReduce + * job to run over one or more table snapshots, with one or more scans configured for each. + * Internally, the input format delegates to {@link TableSnapshotInputFormat} and thus has the same + * performance advantages; see {@link TableSnapshotInputFormat} for more details. Usage is similar + * to TableSnapshotInputFormat, with the following exception: initMultiTableSnapshotMapperJob takes + * in a map from snapshot name to a collection of scans. For each snapshot in the map, each + * corresponding scan will be applied; the overall dataset for the job is defined by the + * concatenation of the regions and tables included in each snapshot/scan pair. + * {@link TableMapReduceUtil#initMultiTableSnapshotMapperJob (java.util.Map, Class, Class, Class, org.apache.hadoop.mapreduce.Job, boolean, org.apache .hadoop.fs.Path)} * can be used to configure the job. - *
    {@code
    + * 
    + * 
    + * {@code
      * Job job = new Job(conf);
      * Map> snapshotScans = ImmutableMap.of(
      *    "snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("a"), Bytes.toBytes("b"))),
    @@ -64,14 +55,11 @@
      *      MyMapOutputValueWritable.class, job, true, restoreDir);
      * }
      * 
    + * * Internally, this input format restores each snapshot into a subdirectory of the given tmp - * directory. Input splits and - * record readers are created as described in - * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} - * (one per region). - * See {@link TableSnapshotInputFormat} for more notes on - * permissioning; the same caveats apply here. - * + * directory. Input splits and record readers are created as described in + * {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat} (one per region). See + * {@link TableSnapshotInputFormat} for more notes on permissioning; the same caveats apply here. * @see TableSnapshotInputFormat * @see org.apache.hadoop.hbase.client.TableSnapshotScanner */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java index ef5c161d4cd9..a9e73d38cc6a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; @@ -42,8 +41,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps; /** - * Shared implementation of mapreduce code over multiple table snapshots. - * Utilized by both mapreduce + * Shared implementation of mapreduce code over multiple table snapshots. Utilized by both mapreduce * {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormat} and mapred * {@link org.apache.hadoop.hbase.mapred.MultiTableSnapshotInputFormat} implementations. */ @@ -78,9 +76,7 @@ public void setInput(Configuration conf, Map> snapshotS /** * Return the list of splits extracted from the scans/snapshots pushed to conf by - * {@link - * #setInput(org.apache.hadoop.conf.Configuration, java.util.Map, org.apache.hadoop.fs.Path)} - * + * {@link #setInput(org.apache.hadoop.conf.Configuration, java.util.Map, org.apache.hadoop.fs.Path)} * @param conf Configuration to determine splits from * @return Return the list of splits extracted from the scans/snapshots pushed to conf * @throws IOException @@ -116,7 +112,6 @@ public List getSplits(Configuration con /** * Retrieve the snapshot name -> list<scan> mapping pushed to configuration by * {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)} - * * @param conf Configuration to extract name -> list<scan> mappings from. * @return the snapshot name -> list<scan> mapping pushed to configuration * @throws IOException @@ -125,8 +120,8 @@ public Map> getSnapshotsToScans(Configuration conf) thr Map> rtn = Maps.newHashMap(); - for (Map.Entry entry : ConfigurationUtil - .getKeyValues(conf, SNAPSHOT_TO_SCANS_KEY)) { + for (Map.Entry entry : ConfigurationUtil.getKeyValues(conf, + SNAPSHOT_TO_SCANS_KEY)) { String snapshotName = entry.getKey(); String scan = entry.getValue(); @@ -144,7 +139,6 @@ public Map> getSnapshotsToScans(Configuration conf) thr /** * Push snapshotScans to conf (under the key {@link #SNAPSHOT_TO_SCANS_KEY}) - * * @param conf * @param snapshotScans * @throws IOException @@ -171,7 +165,6 @@ public void setSnapshotToScans(Configuration conf, Map> /** * Retrieve the directories into which snapshots have been restored from * ({@link #RESTORE_DIRS_KEY}) - * * @param conf Configuration to extract restore directories from * @return the directories into which snapshots have been restored from * @throws IOException @@ -198,10 +191,9 @@ public void setSnapshotDirs(Configuration conf, Map snapshotDirs) } /** - * Generate a random path underneath baseRestoreDir for each snapshot in snapshots and - * return a map from the snapshot to the restore directory. - * - * @param snapshots collection of snapshot names to restore + * Generate a random path underneath baseRestoreDir for each snapshot in snapshots and return a + * map from the snapshot to the restore directory. + * @param snapshots collection of snapshot names to restore * @param baseRestoreDir base directory under which all snapshots in snapshots will be restored * @return a mapping from snapshot name to the directory in which that snapshot has been restored */ @@ -220,10 +212,9 @@ private Map generateSnapshotToRestoreDirMapping(Collection /** * Restore each (snapshot name, restore directory) pair in snapshotToDir - * - * @param conf configuration to restore with + * @param conf configuration to restore with * @param snapshotToDir mapping from snapshot names to restore directories - * @param fs filesystem to do snapshot restoration on + * @param fs filesystem to do snapshot restoration on */ public void restoreSnapshots(Configuration conf, Map snapshotToDir, FileSystem fs) throws IOException { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java index ca82e2a58ee9..16f45816552a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MultithreadedTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,25 +42,23 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Multithreaded implementation for @link org.apache.hbase.mapreduce.TableMapper *

    - * It can be used instead when the Map operation is not CPU - * bound in order to improve throughput. + * It can be used instead when the Map operation is not CPU bound in order to improve throughput. *

    * Mapper implementations using this MapRunnable must be thread-safe. *

    - * The Map-Reduce job has to be configured with the mapper to use via - * {@link #setMapperClass} and the number of thread the thread-pool can use with the - * {@link #getNumberOfThreads} method. The default value is 10 threads. + * The Map-Reduce job has to be configured with the mapper to use via {@link #setMapperClass} and + * the number of thread the thread-pool can use with the {@link #getNumberOfThreads} method. The + * default value is 10 threads. *

    */ @InterfaceAudience.Private public class MultithreadedTableMapper extends TableMapper { private static final Logger LOG = LoggerFactory.getLogger(MultithreadedTableMapper.class); - private Class> mapClass; + private Class> mapClass; private Context outer; private ExecutorService executor; public static final String NUMBER_OF_THREADS = "hbase.mapreduce.multithreadedmapper.threads"; @@ -72,8 +70,7 @@ public class MultithreadedTableMapper extends TableMapper { * @return the number of threads */ public static int getNumberOfThreads(JobContext job) { - return job.getConfiguration(). - getInt(NUMBER_OF_THREADS, 10); + return job.getConfiguration().getInt(NUMBER_OF_THREADS, 10); } /** @@ -82,8 +79,7 @@ public static int getNumberOfThreads(JobContext job) { * @param threads the new number of threads */ public static void setNumberOfThreads(Job job, int threads) { - job.getConfiguration().setInt(NUMBER_OF_THREADS, - threads); + job.getConfiguration().setInt(NUMBER_OF_THREADS, threads); } /** @@ -94,11 +90,10 @@ public static void setNumberOfThreads(Job job, int threads) { * @return the mapper class to run */ @SuppressWarnings("unchecked") - public static - Class> getMapperClass(JobContext job) { - return (Class>) - job.getConfiguration().getClass( MAPPER_CLASS, - Mapper.class); + public static Class> + getMapperClass(JobContext job) { + return (Class>) job.getConfiguration() + .getClass(MAPPER_CLASS, Mapper.class); } /** @@ -108,15 +103,13 @@ Class> getMapperClass(JobContext jo * @param job the job to modify * @param cls the class to use as the mapper */ - public static - void setMapperClass(Job job, - Class> cls) { + public static void setMapperClass(Job job, + Class> cls) { if (MultithreadedTableMapper.class.isAssignableFrom(cls)) { - throw new IllegalArgumentException("Can't have recursive " + - "MultithreadedTableMapper instances."); + throw new IllegalArgumentException( + "Can't have recursive " + "MultithreadedTableMapper instances."); } - job.getConfiguration().setClass(MAPPER_CLASS, - cls, Mapper.class); + job.getConfiguration().setClass(MAPPER_CLASS, cls, Mapper.class); } /** @@ -128,11 +121,10 @@ public void run(Context context) throws IOException, InterruptedException { int numberOfThreads = getNumberOfThreads(context); mapClass = getMapperClass(context); if (LOG.isDebugEnabled()) { - LOG.debug("Configuring multithread runner to use " + numberOfThreads + - " threads"); + LOG.debug("Configuring multithread runner to use " + numberOfThreads + " threads"); } executor = Executors.newFixedThreadPool(numberOfThreads); - for(int i=0; i < numberOfThreads; ++i) { + for (int i = 0; i < numberOfThreads; ++i) { MapRunner thread = new MapRunner(context); executor.execute(thread); } @@ -143,8 +135,7 @@ public void run(Context context) throws IOException, InterruptedException { } } - private class SubMapRecordReader - extends RecordReader { + private class SubMapRecordReader extends RecordReader { private ImmutableBytesWritable key; private Result value; private Configuration conf; @@ -159,9 +150,8 @@ public float getProgress() throws IOException, InterruptedException { } @Override - public void initialize(InputSplit split, - TaskAttemptContext context - ) throws IOException, InterruptedException { + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { conf = context.getConfiguration(); } @@ -171,8 +161,7 @@ public boolean nextKeyValue() throws IOException, InterruptedException { if (!outer.nextKeyValue()) { return false; } - key = ReflectionUtils.copy(outer.getConfiguration(), - outer.getCurrentKey(), key); + key = ReflectionUtils.copy(outer.getConfiguration(), outer.getCurrentKey(), key); value = ReflectionUtils.copy(conf, outer.getCurrentValue(), value); return true; } @@ -188,16 +177,14 @@ public Result getCurrentValue() { } } - private class SubMapRecordWriter extends RecordWriter { + private class SubMapRecordWriter extends RecordWriter { @Override - public void close(TaskAttemptContext context) throws IOException, - InterruptedException { + public void close(TaskAttemptContext context) throws IOException, InterruptedException { } @Override - public void write(K2 key, V2 value) throws IOException, - InterruptedException { + public void write(K2 key, V2 value) throws IOException, InterruptedException { synchronized (outer) { outer.write(key, value); } @@ -231,59 +218,37 @@ public float getProgress() { } } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Don't understand why FB is complaining about this one. We do throw exception") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Don't understand why FB is complaining about this one. We do throw exception") private class MapRunner implements Runnable { - private Mapper mapper; + private Mapper mapper; private Context subcontext; @SuppressWarnings({ "rawtypes", "unchecked" }) MapRunner(Context context) throws IOException, InterruptedException { - mapper = ReflectionUtils.newInstance(mapClass, - context.getConfiguration()); + mapper = ReflectionUtils.newInstance(mapClass, context.getConfiguration()); try { - Constructor c = context.getClass().getConstructor( - Mapper.class, - Configuration.class, - TaskAttemptID.class, - RecordReader.class, - RecordWriter.class, - OutputCommitter.class, - StatusReporter.class, - InputSplit.class); + Constructor c = context.getClass().getConstructor(Mapper.class, Configuration.class, + TaskAttemptID.class, RecordReader.class, RecordWriter.class, OutputCommitter.class, + StatusReporter.class, InputSplit.class); c.setAccessible(true); - subcontext = (Context) c.newInstance( - mapper, - outer.getConfiguration(), - outer.getTaskAttemptID(), - new SubMapRecordReader(), - new SubMapRecordWriter(), - context.getOutputCommitter(), - new SubMapStatusReporter(), - outer.getInputSplit()); + subcontext = (Context) c.newInstance(mapper, outer.getConfiguration(), + outer.getTaskAttemptID(), new SubMapRecordReader(), new SubMapRecordWriter(), + context.getOutputCommitter(), new SubMapStatusReporter(), outer.getInputSplit()); } catch (Exception e) { try { - Constructor c = Class.forName("org.apache.hadoop.mapreduce.task.MapContextImpl").getConstructor( - Configuration.class, - TaskAttemptID.class, - RecordReader.class, - RecordWriter.class, - OutputCommitter.class, - StatusReporter.class, - InputSplit.class); + Constructor c = Class.forName("org.apache.hadoop.mapreduce.task.MapContextImpl") + .getConstructor(Configuration.class, TaskAttemptID.class, RecordReader.class, + RecordWriter.class, OutputCommitter.class, StatusReporter.class, InputSplit.class); c.setAccessible(true); - MapContext mc = (MapContext) c.newInstance( - outer.getConfiguration(), - outer.getTaskAttemptID(), - new SubMapRecordReader(), - new SubMapRecordWriter(), - context.getOutputCommitter(), - new SubMapStatusReporter(), - outer.getInputSplit()); - Class wrappedMapperClass = Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper"); + MapContext mc = (MapContext) c.newInstance(outer.getConfiguration(), + outer.getTaskAttemptID(), new SubMapRecordReader(), new SubMapRecordWriter(), + context.getOutputCommitter(), new SubMapStatusReporter(), outer.getInputSplit()); + Class wrappedMapperClass = + Class.forName("org.apache.hadoop.mapreduce.lib.map.WrappedMapper"); Method getMapContext = wrappedMapperClass.getMethod("getMapContext", MapContext.class); - subcontext = (Context) getMapContext.invoke( - wrappedMapperClass.getDeclaredConstructor().newInstance(), mc); + subcontext = (Context) getMapContext + .invoke(wrappedMapperClass.getDeclaredConstructor().newInstance(), mc); } catch (Exception ee) { // FindBugs: REC_CATCH_EXCEPTION // rethrow as IOE throw new IOException(e); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java index 7859afa496c4..63ed8d1fdc15 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/MutationSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,17 +20,17 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Delete; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.hbase.client.Put; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType; @InterfaceAudience.Public public class MutationSerialization implements Serialization { @@ -69,6 +69,7 @@ public void open(InputStream in) throws IOException { } } + private static class MutationSerializer implements Serializer { private OutputStream out; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java index 317b328df782..4a56b3d2fe63 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutCombiner.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,21 +19,19 @@ import java.io.IOException; import java.util.List; -import java.util.Map.Entry; import java.util.Map; - -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.Map.Entry; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** - * Combine Puts. Merges Put instances grouped by K into a single - * instance. + * Combine Puts. Merges Put instances grouped by K into a single instance. * @see TableMapReduceUtil */ @InterfaceAudience.Public @@ -49,8 +46,8 @@ protected void reduce(K row, Iterable vals, Context context) // flush could result in multiple Puts for a single rowkey. That is // acceptable because Combiner is run as an optimization and it's not // critical that all Puts are grouped perfectly. - long threshold = context.getConfiguration().getLong( - "putcombiner.row.threshold", 1L * (1<<30)); + long threshold = + context.getConfiguration().getLong("putcombiner.row.threshold", 1L * (1 << 30)); int cnt = 0; long curSize = 0; Put put = null; @@ -61,8 +58,7 @@ protected void reduce(K row, Iterable vals, Context context) put = p; familyMap = put.getFamilyCellMap(); } else { - for (Entry> entry : p.getFamilyCellMap() - .entrySet()) { + for (Entry> entry : p.getFamilyCellMap().entrySet()) { List cells = familyMap.get(entry.getKey()); List kvs = (cells != null) ? (List) cells : null; for (Cell cell : entry.getValue()) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java index f4ad1f25fe4b..45b43e0c7545 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/PutSortReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +22,6 @@ import java.util.Iterator; import java.util.List; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -33,7 +31,6 @@ import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; import org.apache.hadoop.hbase.TagUtil; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -41,18 +38,17 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** - * Emits sorted Puts. - * Reads in all Puts from passed Iterator, sorts them, then emits - * Puts in sorted order. If lots of columns per row, it will use lots of - * memory sorting. + * Emits sorted Puts. Reads in all Puts from passed Iterator, sorts them, then emits Puts in sorted + * order. If lots of columns per row, it will use lots of memory sorting. * @see HFileOutputFormat2 * @see CellSortReducer */ @InterfaceAudience.Public -public class PutSortReducer extends - Reducer { +public class PutSortReducer + extends Reducer { // the cell creator private CellCreator kvCreator; @@ -65,16 +61,12 @@ public class PutSortReducer extends } @Override - protected void reduce( - ImmutableBytesWritable row, - java.lang.Iterable puts, - Reducer.Context context) - throws java.io.IOException, InterruptedException - { + protected void reduce(ImmutableBytesWritable row, java.lang.Iterable puts, + Reducer.Context context) + throws java.io.IOException, InterruptedException { // although reduce() is called per-row, handle pathological case - long threshold = context.getConfiguration().getLong( - "putsortreducer.row.threshold", 1L * (1<<30)); + long threshold = + context.getConfiguration().getLong("putsortreducer.row.threshold", 1L * (1 << 30)); Iterator iter = puts.iterator(); while (iter.hasNext()) { TreeSet map = new TreeSet<>(CellComparator.getInstance()); @@ -107,8 +99,8 @@ protected void reduce( // just ignoring the bad one? throw new IOException("Invalid visibility expression found in mutation " + p, e); } - for (List cells: p.getFamilyCellMap().values()) { - for (Cell cell: cells) { + for (List cells : p.getFamilyCellMap().values()) { + for (Cell cell : cells) { // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. KeyValue kv = null; @@ -128,13 +120,12 @@ protected void reduce( } } } - context.setStatus("Read " + map.size() + " entries of " + map.getClass() - + "(" + StringUtils.humanReadableInt(curSize) + ")"); + context.setStatus("Read " + map.size() + " entries of " + map.getClass() + "(" + + StringUtils.humanReadableInt(curSize) + ")"); int index = 0; for (KeyValue kv : map) { context.write(row, kv); - if (++index % 100 == 0) - context.setStatus("Wrote " + index); + if (++index % 100 == 0) context.setStatus("Wrote " + index); } // if we have more entries to process diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java index 40cd34f3844a..5e5e3b5ad9b5 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RegionSizeCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,9 +38,9 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Sets; /** - * Computes size of each region for given table and given column families. - * The value is used by MapReduce for better scheduling. - * */ + * Computes size of each region for given table and given column families. The value is used by + * MapReduce for better scheduling. + */ @InterfaceAudience.Private public class RegionSizeCalculator { @@ -48,7 +48,7 @@ public class RegionSizeCalculator { /** * Maps each region to its size in bytes. - * */ + */ private final Map sizeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); static final String ENABLE_REGIONSIZECALCULATOR = "hbase.regionsizecalculator.enable"; @@ -56,13 +56,12 @@ public class RegionSizeCalculator { /** * Computes size of each region for table and given column families. - * */ + */ public RegionSizeCalculator(RegionLocator regionLocator, Admin admin) throws IOException { init(regionLocator, admin); } - private void init(RegionLocator regionLocator, Admin admin) - throws IOException { + private void init(RegionLocator regionLocator, Admin admin) throws IOException { if (!enabled(admin.getConfiguration())) { LOG.info("Region size calculation disabled."); return; @@ -79,12 +78,12 @@ private void init(RegionLocator regionLocator, Admin admin) Set tableServers = getRegionServersOfTable(regionLocator); for (ServerName tableServerName : tableServers) { - for (RegionMetrics regionLoad : admin.getRegionMetrics( - tableServerName,regionLocator.getName())) { + for (RegionMetrics regionLoad : admin.getRegionMetrics(tableServerName, + regionLocator.getName())) { byte[] regionId = regionLoad.getRegionName(); - long regionSizeBytes - = ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; + long regionSizeBytes = + ((long) regionLoad.getStoreFileSize().get(Size.Unit.MEGABYTE)) * MEGABYTE; sizeMap.put(regionId, regionSizeBytes); @@ -96,8 +95,7 @@ private void init(RegionLocator regionLocator, Admin admin) LOG.debug("Region sizes calculated"); } - private Set getRegionServersOfTable(RegionLocator regionLocator) - throws IOException { + private Set getRegionServersOfTable(RegionLocator regionLocator) throws IOException { Set tableServers = Sets.newHashSet(); for (HRegionLocation regionLocation : regionLocator.getAllRegionLocations()) { @@ -112,7 +110,7 @@ boolean enabled(Configuration configuration) { /** * Returns size of given region in bytes. Returns 0 if region was not found. - * */ + */ public long getRegionSize(byte[] regionId) { Long size = sizeMap.get(regionId); if (size == null) { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java index 9fdaa7b78f75..782621e120af 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/ResultSerialization.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,21 +24,21 @@ import java.io.OutputStream; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Result; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.io.serializer.Deserializer; import org.apache.hadoop.io.serializer.Serialization; import org.apache.hadoop.io.serializer.Serializer; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; @InterfaceAudience.Public public class ResultSerialization extends Configured implements Serialization { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java index 2427e909ff23..5f6233f286ed 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RoundRobinTableInputFormat.java @@ -50,11 +50,11 @@ public class RoundRobinTableInputFormat extends TableInputFormat { private Boolean hbaseRegionsizecalculatorEnableOriginalValue = null; /** * Boolean config for whether superclass should produce InputSplits with 'lengths'. If true, TIF - * will query every RegionServer to get the 'size' of all involved Regions and this 'size' will - * be used the the InputSplit length. If false, we skip this query and the super-classes - * returned InputSplits will have lenghths of zero. This override will set the flag to false. - * All returned lengths will be zero. Makes it so sorting on 'length' becomes a noop. The sort - * returned by this override will prevail. Thats what we want. + * will query every RegionServer to get the 'size' of all involved Regions and this 'size' will be + * used the the InputSplit length. If false, we skip this query and the super-classes returned + * InputSplits will have lenghths of zero. This override will set the flag to false. All returned + * lengths will be zero. Makes it so sorting on 'length' becomes a noop. The sort returned by this + * override will prevail. Thats what we want. */ static String HBASE_REGIONSIZECALCULATOR_ENABLE = "hbase.regionsizecalculator.enable"; @@ -116,26 +116,26 @@ List roundRobin(List inputs) throws IOException { } /** - * Adds a configuration to the Context disabling remote rpc'ing to figure Region size - * when calculating InputSplits. See up in super-class TIF where we rpc to every server to find - * the size of all involved Regions. Here we disable this super-class action. This means - * InputSplits will have a length of zero. If all InputSplits have zero-length InputSplits, the - * ordering done in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will - * ask every node for the current size of each of the participating Table Regions. It does this - * because it wants to schedule the biggest Regions first (This fixation comes of hadoop itself - * -- see JobSubmitter where it sorts inputs by size). This extra diligence takes time and is of - * no utility in this RRTIF where spread is of more import than size-first. Also, if a rolling - * restart is happening when we go to launch the job, the job launch may fail because the request - * for Region size fails -- even after retries -- because rolled RegionServer may take a while to - * come online: e.g. it takes java 90 seconds to allocate a 160G. RegionServer is offline during - * this time. The job launch will fail with 'Connection rejected'. So, we set - * 'hbase.regionsizecalculator.enable' to false here in RRTIF. + * Adds a configuration to the Context disabling remote rpc'ing to figure Region size when + * calculating InputSplits. See up in super-class TIF where we rpc to every server to find the + * size of all involved Regions. Here we disable this super-class action. This means InputSplits + * will have a length of zero. If all InputSplits have zero-length InputSplits, the ordering done + * in here will 'pass-through' Hadoop's length-first sort. The superclass TIF will ask every node + * for the current size of each of the participating Table Regions. It does this because it wants + * to schedule the biggest Regions first (This fixation comes of hadoop itself -- see JobSubmitter + * where it sorts inputs by size). This extra diligence takes time and is of no utility in this + * RRTIF where spread is of more import than size-first. Also, if a rolling restart is happening + * when we go to launch the job, the job launch may fail because the request for Region size fails + * -- even after retries -- because rolled RegionServer may take a while to come online: e.g. it + * takes java 90 seconds to allocate a 160G. RegionServer is offline during this time. The job + * launch will fail with 'Connection rejected'. So, we set 'hbase.regionsizecalculator.enable' to + * false here in RRTIF. * @see #unconfigure() */ void configure() { if (getConf().get(HBASE_REGIONSIZECALCULATOR_ENABLE) != null) { - this.hbaseRegionsizecalculatorEnableOriginalValue = getConf(). - getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true); + this.hbaseRegionsizecalculatorEnableOriginalValue = + getConf().getBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, true); } getConf().setBoolean(HBASE_REGIONSIZECALCULATOR_ENABLE, false); } @@ -165,7 +165,7 @@ public static void main(String[] args) throws IOException { configuration.set(TableInputFormat.INPUT_TABLE, args[0]); tif.setConf(configuration); List splits = tif.getSplits(new JobContextImpl(configuration, new JobID())); - for (InputSplit split: splits) { + for (InputSplit split : splits) { System.out.println(split); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java index 50d726b12cdc..913fb6c94e1c 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/RowCounter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,37 +18,37 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; -import java.util.List; import java.util.ArrayList; - +import java.util.List; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.util.AbstractHBaseTool; -import org.apache.hbase.thirdparty.com.google.common.base.Splitter; -import org.apache.hbase.thirdparty.org.apache.commons.cli.BasicParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; -import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; -import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; -import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; -import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.FilterBase; import org.apache.hadoop.hbase.filter.FirstKeyOnlyFilter; import org.apache.hadoop.hbase.filter.MultiRowRangeFilter; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hbase.thirdparty.com.google.common.base.Splitter; +import org.apache.hbase.thirdparty.org.apache.commons.cli.BasicParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; +import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLineParser; +import org.apache.hbase.thirdparty.org.apache.commons.cli.HelpFormatter; +import org.apache.hbase.thirdparty.org.apache.commons.cli.MissingOptionException; +import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; /** - * A job with a just a map phase to count rows. Map outputs table rows IF the - * input row has columns that have content. + * A job with a just a map phase to count rows. Map outputs table rows IF the input row has columns + * that have content. */ @InterfaceAudience.Public public class RowCounter extends AbstractHBaseTool { @@ -77,25 +76,23 @@ public class RowCounter extends AbstractHBaseTool { /** * Mapper that runs the count. */ - static class RowCounterMapper - extends TableMapper { + static class RowCounterMapper extends TableMapper { /** Counter enumeration to count the actual rows. */ - public static enum Counters {ROWS} + public static enum Counters { + ROWS + } /** * Maps the data. - * - * @param row The current table row key. - * @param values The columns. - * @param context The current context. + * @param row The current table row key. + * @param values The columns. + * @param context The current context. * @throws IOException When something is broken with the data. * @see org.apache.hadoop.mapreduce.Mapper#map(Object, Object, Context) */ @Override - public void map(ImmutableBytesWritable row, Result values, - Context context) - throws IOException { + public void map(ImmutableBytesWritable row, Result values, Context context) throws IOException { // Count every row containing data, whether it's in qualifiers or values context.getCounter(Counters.ROWS).increment(1); } @@ -103,8 +100,7 @@ public void map(ImmutableBytesWritable row, Result values, /** * Sets up the actual job. - * - * @param conf The current configuration. + * @param conf The current configuration. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -125,30 +121,28 @@ public Job createSubmittableJob(Configuration conf) throws IOException { } } - if(this.expectedCount >= 0) { + if (this.expectedCount >= 0) { conf.setLong(EXPECTED_COUNT_KEY, this.expectedCount); } scan.setTimeRange(startTime, endTime); job.setOutputFormatClass(NullOutputFormat.class); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setNumReduceTasks(0); return job; } /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. * @deprecated as of release 2.3.0. Will be removed on 4.0.0. Please use main method instead. */ @Deprecated - public static Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException { String tableName = args[0]; List rowRangeList = null; long startTime = 0; @@ -166,7 +160,7 @@ public static Job createSubmittableJob(Configuration conf, String[] args) if (args[i].startsWith(rangeSwitch)) { try { rowRangeList = parseRowRangeParameter( - args[i].substring(args[1].indexOf(rangeSwitch)+rangeSwitch.length())); + args[i].substring(args[1].indexOf(rangeSwitch) + rangeSwitch.length())); } catch (IllegalArgumentException e) { return null; } @@ -206,58 +200,55 @@ public static Job createSubmittableJob(Configuration conf, String[] args) if (StringUtils.isBlank(qualifier)) { scan.addFamily(Bytes.toBytes(family)); - } - else { + } else { scan.addColumn(Bytes.toBytes(family), Bytes.toBytes(qualifier)); } } } scan.setTimeRange(startTime, endTime == 0 ? HConstants.LATEST_TIMESTAMP : endTime); job.setOutputFormatClass(NullOutputFormat.class); - TableMapReduceUtil.initTableMapperJob(tableName, scan, - RowCounterMapper.class, ImmutableBytesWritable.class, Result.class, job); + TableMapReduceUtil.initTableMapperJob(tableName, scan, RowCounterMapper.class, + ImmutableBytesWritable.class, Result.class, job); job.setNumReduceTasks(0); return job; } /** - * Prints usage without error message. - * Note that we don't document --expected-count, because it's intended for test. + * Prints usage without error message. Note that we don't document --expected-count, because it's + * intended for test. */ private static void printUsage(String errorMessage) { System.err.println("ERROR: " + errorMessage); - System.err.println("Usage: hbase rowcounter [options] " - + "[--starttime= --endtime=] " - + "[--range=[startKey],[endKey][;[startKey],[endKey]...]] [ ...]"); + System.err.println( + "Usage: hbase rowcounter [options] " + "[--starttime= --endtime=] " + + "[--range=[startKey],[endKey][;[startKey],[endKey]...]] [ ...]"); System.err.println("For performance consider the following options:\n" - + "-Dhbase.client.scanner.caching=100\n" - + "-Dmapreduce.map.speculative=false"); + + "-Dhbase.client.scanner.caching=100\n" + "-Dmapreduce.map.speculative=false"); } private static List parseRowRangeParameter(String arg) { final List rangesSplit = Splitter.on(";").splitToList(arg); final List rangeList = new ArrayList<>(); for (String range : rangesSplit) { - if(range!=null && !range.isEmpty()) { + if (range != null && !range.isEmpty()) { List startEnd = Splitter.on(",").splitToList(range); if (startEnd.size() != 2 || startEnd.get(1).contains(",")) { throw new IllegalArgumentException("Wrong range specification: " + range); } String startKey = startEnd.get(0); String endKey = startEnd.get(1); - rangeList.add(new MultiRowRangeFilter.RowRange(Bytes.toBytesBinary(startKey), - true, Bytes.toBytesBinary(endKey), false)); + rangeList.add(new MultiRowRangeFilter.RowRange(Bytes.toBytesBinary(startKey), true, + Bytes.toBytesBinary(endKey), false)); } } return rangeList; } /** - * Sets filter {@link FilterBase} to the {@link Scan} instance. - * If provided rowRangeList contains more than one element, - * method sets filter which is instance of {@link MultiRowRangeFilter}. - * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. - * If rowRangeList contains exactly one element, startRow and stopRow are set to the scan. + * Sets filter {@link FilterBase} to the {@link Scan} instance. If provided rowRangeList contains + * more than one element, method sets filter which is instance of {@link MultiRowRangeFilter}. + * Otherwise, method sets filter which is instance of {@link FirstKeyOnlyFilter}. If rowRangeList + * contains exactly one element, startRow and stopRow are set to the scan. * @param scan * @param rowRangeList */ @@ -268,8 +259,8 @@ private static void setScanFilter(Scan scan, List } if (size == 1) { MultiRowRangeFilter.RowRange range = rowRangeList.get(0); - scan.setStartRow(range.getStartRow()); //inclusive - scan.setStopRow(range.getStopRow()); //exclusive + scan.setStartRow(range.getStartRow()); // inclusive + scan.setStopRow(range.getStopRow()); // exclusive } else if (size > 1) { scan.setFilter(new MultiRowRangeFilter(rowRangeList)); } @@ -281,8 +272,8 @@ protected void printUsage() { footerBuilder.append("For performance, consider the following configuration properties:\n"); footerBuilder.append("-Dhbase.client.scanner.caching=100\n"); footerBuilder.append("-Dmapreduce.map.speculative=false\n"); - printUsage("hbase rowcounter [options] [ ...]", - "Options:", footerBuilder.toString()); + printUsage("hbase rowcounter [options] [ ...]", "Options:", + footerBuilder.toString()); } @Override @@ -297,15 +288,15 @@ protected void printUsage(final String usageStr, final String usageHeader, @Override protected void addOptions() { - Option startTimeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("starting time filter to start counting rows from.").longOpt(OPT_START_TIME).build(); - Option endTimeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("end time filter limit, to only count rows up to this timestamp."). - longOpt(OPT_END_TIME).build(); - Option rangeOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("[startKey],[endKey][;[startKey],[endKey]...]]").longOpt(OPT_RANGE).build(); - Option expectedOption = Option.builder(null).valueSeparator('=').hasArg(true). - desc("expected number of rows to be count.").longOpt(OPT_EXPECTED_COUNT).build(); + Option startTimeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("starting time filter to start counting rows from.").longOpt(OPT_START_TIME).build(); + Option endTimeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("end time filter limit, to only count rows up to this timestamp.") + .longOpt(OPT_END_TIME).build(); + Option rangeOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("[startKey],[endKey][;[startKey],[endKey]...]]").longOpt(OPT_RANGE).build(); + Option expectedOption = Option.builder(null).valueSeparator('=').hasArg(true) + .desc("expected number of rows to be count.").longOpt(OPT_EXPECTED_COUNT).build(); addOption(startTimeOption); addOption(endTimeOption); addOption(rangeOption); @@ -313,28 +304,28 @@ protected void addOptions() { } @Override - protected void processOptions(CommandLine cmd) throws IllegalArgumentException{ + protected void processOptions(CommandLine cmd) throws IllegalArgumentException { this.tableName = cmd.getArgList().get(0); - if(cmd.getOptionValue(OPT_RANGE)!=null) { + if (cmd.getOptionValue(OPT_RANGE) != null) { this.rowRangeList = parseRowRangeParameter(cmd.getOptionValue(OPT_RANGE)); } - this.endTime = cmd.getOptionValue(OPT_END_TIME) == null ? HConstants.LATEST_TIMESTAMP : - Long.parseLong(cmd.getOptionValue(OPT_END_TIME)); - this.expectedCount = cmd.getOptionValue(OPT_EXPECTED_COUNT) == null ? Long.MIN_VALUE : - Long.parseLong(cmd.getOptionValue(OPT_EXPECTED_COUNT)); - this.startTime = cmd.getOptionValue(OPT_START_TIME) == null ? 0 : - Long.parseLong(cmd.getOptionValue(OPT_START_TIME)); - - for(int i=1; ihbase.simpletotalorder.start - * and hbase.simpletotalorder.end. The end key needs to be - * exclusive; i.e. one larger than the biggest key in your key space. - * You may be surprised at how this class partitions the space; it may not - * align with preconceptions; e.g. a start key of zero and an end key of 100 - * divided in ten will not make regions whose range is 0-10, 10-20, and so on. - * Make your own partitioner if you need the region spacing to come out a + * A partitioner that takes start and end keys and uses bigdecimal to figure which reduce a key + * belongs to. Pass the start and end keys in the Configuration using + * hbase.simpletotalorder.start and hbase.simpletotalorder.end. The end + * key needs to be exclusive; i.e. one larger than the biggest key in your key space. You may be + * surprised at how this class partitions the space; it may not align with preconceptions; e.g. a + * start key of zero and an end key of 100 divided in ten will not make regions whose range is 0-10, + * 10-20, and so on. Make your own partitioner if you need the region spacing to come out a * particular way. * @param * @see #START @@ -46,7 +42,7 @@ */ @InterfaceAudience.Public public class SimpleTotalOrderPartitioner extends Partitioner -implements Configurable { + implements Configurable { private final static Logger LOG = LoggerFactory.getLogger(SimpleTotalOrderPartitioner.class); /** @@ -67,9 +63,9 @@ public class SimpleTotalOrderPartitioner extends PartitioneremptyIterator()); + private static final CellScanner EMPTY_CELL_SCANNER = + new CellScanner(Collections. emptyIterator()); /** - * Rescan the given range directly from the source and target tables. - * Count and log differences, and if this is not a dry run, output Puts and Deletes - * to make the target table match the source table for this range + * Rescan the given range directly from the source and target tables. Count and log differences, + * and if this is not a dry run, output Puts and Deletes to make the target table match the + * source table for this range */ private void syncRange(Context context, ImmutableBytesWritable startRow, ImmutableBytesWritable stopRow) throws IOException, InterruptedException { @@ -360,7 +360,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, boolean rangeMatched = true; byte[] nextSourceRow = sourceCells.nextRow(); byte[] nextTargetRow = targetCells.nextRow(); - while(nextSourceRow != null || nextTargetRow != null) { + while (nextSourceRow != null || nextTargetRow != null) { boolean rowMatched; int rowComparison = compareRowKeys(nextSourceRow, nextTargetRow); if (rowComparison < 0) { @@ -370,7 +370,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, context.getCounter(Counter.TARGETMISSINGROWS).increment(1); rowMatched = syncRowCells(context, nextSourceRow, sourceCells, EMPTY_CELL_SCANNER); - nextSourceRow = sourceCells.nextRow(); // advance only source to next row + nextSourceRow = sourceCells.nextRow(); // advance only source to next row } else if (rowComparison > 0) { if (LOG.isDebugEnabled()) { LOG.debug("Source missing row: " + Bytes.toString(nextTargetRow)); @@ -378,7 +378,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, context.getCounter(Counter.SOURCEMISSINGROWS).increment(1); rowMatched = syncRowCells(context, nextTargetRow, EMPTY_CELL_SCANNER, targetCells); - nextTargetRow = targetCells.nextRow(); // advance only target to next row + nextTargetRow = targetCells.nextRow(); // advance only target to next row } else { // current row is the same on both sides, compare cell by cell rowMatched = syncRowCells(context, nextSourceRow, sourceCells, targetCells); @@ -395,7 +395,7 @@ private void syncRange(Context context, ImmutableBytesWritable startRow, targetScanner.close(); context.getCounter(rangeMatched ? Counter.RANGESMATCHED : Counter.RANGESNOTMATCHED) - .increment(1); + .increment(1); } private static class CellScanner { @@ -412,8 +412,7 @@ public CellScanner(Iterator results) { } /** - * Advance to the next row and return its row key. - * Returns null iff there are no more rows. + * Advance to the next row and return its row key. Returns null iff there are no more rows. */ public byte[] nextRow() { if (nextRowResult == null) { @@ -421,9 +420,8 @@ public byte[] nextRow() { while (results.hasNext()) { nextRowResult = results.next(); Cell nextCell = nextRowResult.rawCells()[0]; - if (currentRow == null - || !Bytes.equals(currentRow, 0, currentRow.length, nextCell.getRowArray(), - nextCell.getRowOffset(), nextCell.getRowLength())) { + if (currentRow == null || !Bytes.equals(currentRow, 0, currentRow.length, + nextCell.getRowArray(), nextCell.getRowOffset(), nextCell.getRowLength())) { // found next row break; } else { @@ -464,7 +462,7 @@ public Cell nextCellInRow() { Result result = results.next(); Cell cell = result.rawCells()[0]; if (Bytes.equals(currentRow, 0, currentRow.length, cell.getRowArray(), - cell.getRowOffset(), cell.getRowLength())) { + cell.getRowOffset(), cell.getRowLength())) { // result is part of current row currentRowResult = result; nextCellInRow = 0; @@ -483,28 +481,26 @@ public Cell nextCellInRow() { } } - private Cell checkAndResetTimestamp(Cell sourceCell){ + private Cell checkAndResetTimestamp(Cell sourceCell) { if (ignoreTimestamp) { sourceCell = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setType(sourceCell.getType()) - .setRow(sourceCell.getRowArray(), - sourceCell.getRowOffset(), sourceCell.getRowLength()) - .setFamily(sourceCell.getFamilyArray(), - sourceCell.getFamilyOffset(), sourceCell.getFamilyLength()) - .setQualifier(sourceCell.getQualifierArray(), - sourceCell.getQualifierOffset(), sourceCell.getQualifierLength()) - .setTimestamp(EnvironmentEdgeManager.currentTime()) - .setValue(sourceCell.getValueArray(), - sourceCell.getValueOffset(), sourceCell.getValueLength()).build(); + .setType(sourceCell.getType()) + .setRow(sourceCell.getRowArray(), sourceCell.getRowOffset(), sourceCell.getRowLength()) + .setFamily(sourceCell.getFamilyArray(), sourceCell.getFamilyOffset(), + sourceCell.getFamilyLength()) + .setQualifier(sourceCell.getQualifierArray(), sourceCell.getQualifierOffset(), + sourceCell.getQualifierLength()) + .setTimestamp(EnvironmentEdgeManager.currentTime()).setValue(sourceCell.getValueArray(), + sourceCell.getValueOffset(), sourceCell.getValueLength()) + .build(); } return sourceCell; } /** - * Compare the cells for the given row from the source and target tables. - * Count and log any differences. - * If not a dry run, output a Put and/or Delete needed to sync the target table - * to match the source table. + * Compare the cells for the given row from the source and target tables. Count and log any + * differences. If not a dry run, output a Put and/or Delete needed to sync the target table to + * match the source table. */ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceCells, CellScanner targetCells) throws IOException, InterruptedException { @@ -545,8 +541,8 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC delete = new Delete(rowKey); } // add a tombstone to exactly match the target cell that is missing on the source - delete.addColumn(CellUtil.cloneFamily(targetCell), - CellUtil.cloneQualifier(targetCell), targetCell.getTimestamp()); + delete.addColumn(CellUtil.cloneFamily(targetCell), CellUtil.cloneQualifier(targetCell), + targetCell.getTimestamp()); } targetCell = targetCells.nextCellInRow(); @@ -557,12 +553,12 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC } else { if (LOG.isDebugEnabled()) { LOG.debug("Different values: "); - LOG.debug(" source cell: " + sourceCell - + " value: " + Bytes.toString(sourceCell.getValueArray(), - sourceCell.getValueOffset(), sourceCell.getValueLength())); - LOG.debug(" target cell: " + targetCell - + " value: " + Bytes.toString(targetCell.getValueArray(), - targetCell.getValueOffset(), targetCell.getValueLength())); + LOG.debug(" source cell: " + sourceCell + " value: " + + Bytes.toString(sourceCell.getValueArray(), sourceCell.getValueOffset(), + sourceCell.getValueLength())); + LOG.debug(" target cell: " + targetCell + " value: " + + Bytes.toString(targetCell.getValueArray(), targetCell.getValueOffset(), + targetCell.getValueLength())); } context.getCounter(Counter.DIFFERENTCELLVALUES).increment(1); matchingRow = false; @@ -614,12 +610,11 @@ private boolean syncRowCells(Context context, byte[] rowKey, CellScanner sourceC } /** - * Compare row keys of the given Result objects. - * Nulls are after non-nulls + * Compare row keys of the given Result objects. Nulls are after non-nulls */ private static int compareRowKeys(byte[] r1, byte[] r2) { if (r1 == null) { - return 1; // source missing row + return 1; // source missing row } else if (r2 == null) { return -1; // target missing row } else { @@ -630,11 +625,10 @@ private static int compareRowKeys(byte[] r1, byte[] r2) { } /** - * Compare families, qualifiers, and timestamps of the given Cells. - * They are assumed to be of the same row. - * Nulls are after non-nulls. + * Compare families, qualifiers, and timestamps of the given Cells. They are assumed to be of + * the same row. Nulls are after non-nulls. */ - private int compareCellKeysWithinRow(Cell c1, Cell c2) { + private int compareCellKeysWithinRow(Cell c1, Cell c2) { if (c1 == null) { return 1; // source missing cell } @@ -661,8 +655,7 @@ private int compareCellKeysWithinRow(Cell c1, Cell c2) { } @Override - protected void cleanup(Context context) - throws IOException, InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { if (mapperException == null) { try { finishRemainingHashRanges(context); @@ -692,8 +685,8 @@ protected void cleanup(Context context) } } - private void finishRemainingHashRanges(Context context) throws IOException, - InterruptedException { + private void finishRemainingHashRanges(Context context) + throws IOException, InterruptedException { TableSplit split = (TableSplit) context.getInputSplit(); byte[] splitEndRow = split.getEndRow(); boolean reachedEndOfTable = HashTable.isTableEndRow(splitEndRow); @@ -708,7 +701,7 @@ private void finishRemainingHashRanges(Context context) throws IOException, // need to complete the final open hash batch if ((nextSourceKey != null && nextSourceKey.compareTo(splitEndRow) > 0) - || (nextSourceKey == null && !Bytes.equals(splitEndRow, sourceTableHash.stopRow))) { + || (nextSourceKey == null && !Bytes.equals(splitEndRow, sourceTableHash.stopRow))) { // the open hash range continues past the end of this region // add a scan to complete the current hash range Scan scan = sourceTableHash.initScan(); @@ -738,6 +731,7 @@ private void finishRemainingHashRanges(Context context) throws IOException, } private static final int NUM_ARGS = 3; + private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { System.err.println("ERROR: " + errorMsg); @@ -771,8 +765,7 @@ private static void printUsage(final String errorMsg) { System.err.println("Examples:"); System.err.println(" For a dry run SyncTable of tableA from a remote source cluster"); System.err.println(" to a local target cluster:"); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true" + System.err.println(" $ hbase " + "org.apache.hadoop.hbase.mapreduce.SyncTable --dryrun=true" + " --sourcezkcluster=zk1.example.com,zk2.example.com,zk3.example.com:2181:/hbase" + " hdfs://nn:9000/hashes/tableA tableA tableA"); } @@ -834,7 +827,6 @@ private boolean doCommandLine(final String[] args) { return false; } - } catch (Exception e) { e.printStackTrace(); printUsage("Can't start because " + e.getMessage()); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java index 3eb7d699bd0c..985daba0a85a 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,31 +21,29 @@ import java.util.Collections; import java.util.List; import java.util.Locale; - import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.mapreduce.InputSplit; -import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Convert HBase tabular data into a format that is consumable by Map/Reduce. */ @InterfaceAudience.Public -public class TableInputFormat extends TableInputFormatBase -implements Configurable { +public class TableInputFormat extends TableInputFormatBase implements Configurable { @SuppressWarnings("hiding") private static final Logger LOG = LoggerFactory.getLogger(TableInputFormat.class); @@ -54,12 +51,13 @@ public class TableInputFormat extends TableInputFormatBase /** Job parameter that specifies the input table. */ public static final String INPUT_TABLE = "hbase.mapreduce.inputtable"; /** - * If specified, use start keys of this table to split. - * This is useful when you are preparing data for bulkload. + * If specified, use start keys of this table to split. This is useful when you are preparing data + * for bulkload. */ private static final String SPLIT_TABLE = "hbase.mapreduce.splittable"; - /** Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. - * See {@link TableMapReduceUtil#convertScanToString(Scan)} for more details. + /** + * Base-64 encoded scanner. All other SCAN_ confs are ignored if this is specified. See + * {@link TableMapReduceUtil#convertScanToString(Scan)} for more details. */ public static final String SCAN = "hbase.mapreduce.scan"; /** Scan start row */ @@ -92,7 +90,6 @@ public class TableInputFormat extends TableInputFormatBase /** * Returns the current configuration. - * * @return The current configuration. * @see org.apache.hadoop.conf.Configurable#getConf() */ @@ -102,16 +99,13 @@ public Configuration getConf() { } /** - * Sets the configuration. This is used to set the details for the table to - * be scanned. - * - * @param configuration The configuration to set. - * @see org.apache.hadoop.conf.Configurable#setConf( - * org.apache.hadoop.conf.Configuration) + * Sets the configuration. This is used to set the details for the table to be scanned. + * @param configuration The configuration to set. + * @see org.apache.hadoop.conf.Configurable#setConf( org.apache.hadoop.conf.Configuration) */ @Override - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="REC_CATCH_EXCEPTION", - justification="Intentional") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "REC_CATCH_EXCEPTION", + justification = "Intentional") public void setConf(Configuration configuration) { this.conf = configuration; @@ -127,7 +121,7 @@ public void setConf(Configuration configuration) { try { scan = createScanFromConfiguration(conf); } catch (Exception e) { - LOG.error(StringUtils.stringifyException(e)); + LOG.error(StringUtils.stringifyException(e)); } } @@ -135,13 +129,13 @@ public void setConf(Configuration configuration) { } /** - * Sets up a {@link Scan} instance, applying settings from the configuration property - * constants defined in {@code TableInputFormat}. This allows specifying things such as: + * Sets up a {@link Scan} instance, applying settings from the configuration property constants + * defined in {@code TableInputFormat}. This allows specifying things such as: *

      - *
    • start and stop rows
    • - *
    • column qualifiers or families
    • - *
    • timestamps or timerange
    • - *
    • scanner caching and batch size
    • + *
    • start and stop rows
    • + *
    • column qualifiers or families
    • + *
    • timestamps or timerange
    • + *
    • scanner caching and batch size
    • *
    */ public static Scan createScanFromConfiguration(Configuration conf) throws IOException { @@ -168,9 +162,8 @@ public static Scan createScanFromConfiguration(Configuration conf) throws IOExce } if (conf.get(SCAN_TIMERANGE_START) != null && conf.get(SCAN_TIMERANGE_END) != null) { - scan.setTimeRange( - Long.parseLong(conf.get(SCAN_TIMERANGE_START)), - Long.parseLong(conf.get(SCAN_TIMERANGE_END))); + scan.setTimeRange(Long.parseLong(conf.get(SCAN_TIMERANGE_START)), + Long.parseLong(conf.get(SCAN_TIMERANGE_END))); } if (conf.get(SCAN_MAXVERSIONS) != null) { @@ -204,16 +197,14 @@ protected void initialize(JobContext context) throws IOException { } /** - * Parses a combined family and qualifier and adds either both or just the - * family in case there is no qualifier. This assumes the older colon - * divided notation, e.g. "family:qualifier". - * + * Parses a combined family and qualifier and adds either both or just the family in case there is + * no qualifier. This assumes the older colon divided notation, e.g. "family:qualifier". * @param scan The Scan to update. * @param familyAndQualifier family and qualifier * @throws IllegalArgumentException When familyAndQualifier is invalid. */ private static void addColumn(Scan scan, byte[] familyAndQualifier) { - byte [][] fq = CellUtil.parseColumn(familyAndQualifier); + byte[][] fq = CellUtil.parseColumn(familyAndQualifier); if (fq.length == 1) { scan.addFamily(fq[0]); } else if (fq.length == 2) { @@ -228,31 +219,29 @@ private static void addColumn(Scan scan, byte[] familyAndQualifier) { *

    * Overrides previous calls to {@link Scan#addColumn(byte[], byte[])}for any families in the * input. - * * @param scan The Scan to update. * @param columns array of columns, formatted as family:qualifier * @see Scan#addColumn(byte[], byte[]) */ - public static void addColumns(Scan scan, byte [][] columns) { + public static void addColumns(Scan scan, byte[][] columns) { for (byte[] column : columns) { addColumn(scan, column); } } /** - * Calculates the splits that will serve as input for the map tasks. The - * number of splits matches the number of regions in a table. Splits are shuffled if - * required. - * @param context The current job context. + * Calculates the splits that will serve as input for the map tasks. The number of splits matches + * the number of regions in a table. Splits are shuffled if required. + * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - * org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ @Override public List getSplits(JobContext context) throws IOException { List splits = super.getSplits(context); - if ((conf.get(SHUFFLE_MAPS) != null) && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) { + if ((conf.get(SHUFFLE_MAPS) != null) + && "true".equals(conf.get(SHUFFLE_MAPS).toLowerCase(Locale.ROOT))) { Collections.shuffle(splits); } return splits; @@ -260,9 +249,8 @@ public List getSplits(JobContext context) throws IOException { /** * Convenience method to parse a string representation of an array of column specifiers. - * * @param scan The Scan to update. - * @param columns The columns to parse. + * @param columns The columns to parse. */ private static void addColumns(Scan scan, String columns) { String[] cols = columns.split(" "); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java index 92d1f1136398..c4c944f2bec5 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableInputFormatBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,19 +52,18 @@ import org.slf4j.LoggerFactory; /** - * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, - * an {@link Scan} instance that defines the input columns etc. Subclasses may use - * other TableRecordReader implementations. - * - * Subclasses MUST ensure initializeTable(Connection, TableName) is called for an instance to - * function properly. Each of the entry points to this class used by the MapReduce framework, - * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, - * will call {@link #initialize(JobContext)} as a convenient centralized location to handle - * retrieving the necessary configuration information. If your subclass overrides either of these - * methods, either call the parent version or call initialize yourself. - * + * A base for {@link TableInputFormat}s. Receives a {@link Connection}, a {@link TableName}, an + * {@link Scan} instance that defines the input columns etc. Subclasses may use other + * TableRecordReader implementations. Subclasses MUST ensure initializeTable(Connection, TableName) + * is called for an instance to function properly. Each of the entry points to this class used by + * the MapReduce framework, {@link #createRecordReader(InputSplit, TaskAttemptContext)} and + * {@link #getSplits(JobContext)}, will call {@link #initialize(JobContext)} as a convenient + * centralized location to handle retrieving the necessary configuration information. If your + * subclass overrides either of these methods, either call the parent version or call initialize + * yourself. *

    * An example of a subclass: + * *

      *   class ExampleTIF extends TableInputFormatBase {
      *
    @@ -92,42 +90,43 @@
      *   }
      * 
    * - * - * The number of InputSplits(mappers) match the number of regions in a table by default. - * Set "hbase.mapreduce.tableinput.mappers.per.region" to specify how many mappers per region, set - * this property will disable autobalance below.\ - * Set "hbase.mapreduce.tif.input.autobalance" to enable autobalance, hbase will assign mappers - * based on average region size; For regions, whose size larger than average region size may assigned - * more mappers, and for smaller one, they may group together to use one mapper. If actual average - * region size is too big, like 50G, it is not good to only assign 1 mapper for those large regions. - * Use "hbase.mapreduce.tif.ave.regionsize" to set max average region size when enable "autobalanece", - * default mas average region size is 8G. + * The number of InputSplits(mappers) match the number of regions in a table by default. Set + * "hbase.mapreduce.tableinput.mappers.per.region" to specify how many mappers per region, set this + * property will disable autobalance below.\ Set "hbase.mapreduce.tif.input.autobalance" to enable + * autobalance, hbase will assign mappers based on average region size; For regions, whose size + * larger than average region size may assigned more mappers, and for smaller one, they may group + * together to use one mapper. If actual average region size is too big, like 50G, it is not good to + * only assign 1 mapper for those large regions. Use "hbase.mapreduce.tif.ave.regionsize" to set max + * average region size when enable "autobalanece", default mas average region size is 8G. */ @InterfaceAudience.Public -public abstract class TableInputFormatBase - extends InputFormat { +public abstract class TableInputFormatBase extends InputFormat { private static final Logger LOG = LoggerFactory.getLogger(TableInputFormatBase.class); - private static final String NOT_INITIALIZED = "The input format instance has not been properly " + - "initialized. Ensure you call initializeTable either in your constructor or initialize " + - "method"; - private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + - " previous error. Please look at the previous logs lines from" + - " the task's full log for more details."; + private static final String NOT_INITIALIZED = "The input format instance has not been properly " + + "initialized. Ensure you call initializeTable either in your constructor or initialize " + + "method"; + private static final String INITIALIZATION_ERROR = "Cannot create a record reader because of a" + + " previous error. Please look at the previous logs lines from" + + " the task's full log for more details."; /** Specify if we enable auto-balance to set number of mappers in M/R jobs. */ public static final String MAPREDUCE_INPUT_AUTOBALANCE = "hbase.mapreduce.tif.input.autobalance"; - /** In auto-balance, we split input by ave region size, if calculated region size is too big, we can set it. */ + /** + * In auto-balance, we split input by ave region size, if calculated region size is too big, we + * can set it. + */ public static final String MAX_AVERAGE_REGION_SIZE = "hbase.mapreduce.tif.ave.regionsize"; /** Set the number of Mappers for each region, all regions have same number of Mappers */ - public static final String NUM_MAPPERS_PER_REGION = "hbase.mapreduce.tableinput.mappers.per.region"; + public static final String NUM_MAPPERS_PER_REGION = + "hbase.mapreduce.tableinput.mappers.per.region"; - - /** Holds the details for the internal scanner. - * - * @see Scan */ + /** + * Holds the details for the internal scanner. + * @see Scan + */ private Scan scan = null; /** The {@link Admin}. */ private Admin admin; @@ -142,27 +141,22 @@ public abstract class TableInputFormatBase /** Used to generate splits based on region size. */ private RegionSizeCalculator regionSizeCalculator; - /** The reverse DNS lookup cache mapping: IPAddress => HostName */ - private HashMap reverseDNSCacheMap = - new HashMap<>(); + private HashMap reverseDNSCacheMap = new HashMap<>(); /** - * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses - * the default. - * - * @param split The split to work with. - * @param context The current context. + * Builds a {@link TableRecordReader}. If no {@link TableRecordReader} was provided, uses the + * default. + * @param split The split to work with. + * @param context The current context. * @return The newly created record reader. * @throws IOException When creating the reader fails. * @see org.apache.hadoop.mapreduce.InputFormat#createRecordReader( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) - throws IOException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException { // Just in case a subclass is relying on JobConfigurable magic. if (table == null) { initialize(context); @@ -209,8 +203,8 @@ public float getProgress() throws IOException, InterruptedException { } @Override - public void initialize(InputSplit inputsplit, TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { trr.initialize(inputsplit, context); } @@ -221,17 +215,16 @@ public boolean nextKeyValue() throws IOException, InterruptedException { }; } - protected Pair getStartEndKeys() throws IOException { + protected Pair getStartEndKeys() throws IOException { return getRegionLocator().getStartEndKeys(); } /** * Calculates the splits that will serve as input for the map tasks. - * @param context The current job context. + * @param context The current job context. * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - * org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ @Override public List getSplits(JobContext context) throws IOException { @@ -267,10 +260,10 @@ public List getSplits(JobContext context) throws IOException { return res; } - //The default value of "hbase.mapreduce.input.autobalance" is false. + // The default value of "hbase.mapreduce.input.autobalance" is false. if (context.getConfiguration().getBoolean(MAPREDUCE_INPUT_AUTOBALANCE, false)) { - long maxAveRegionSize = context.getConfiguration() - .getLong(MAX_AVERAGE_REGION_SIZE, 8L*1073741824); //8GB + long maxAveRegionSize = + context.getConfiguration().getLong(MAX_AVERAGE_REGION_SIZE, 8L * 1073741824); // 8GB return calculateAutoBalancedSplits(splits, maxAveRegionSize); } @@ -285,7 +278,6 @@ public List getSplits(JobContext context) throws IOException { /** * Create one InputSplit per region - * * @return The list of InputSplit for all the regions * @throws IOException throws IOException */ @@ -299,8 +291,7 @@ private List oneInputSplitPerRegion() throws IOException { TableName tableName = getTable().getName(); Pair keys = getStartEndKeys(); - if (keys == null || keys.getFirst() == null || - keys.getFirst().length == 0) { + if (keys == null || keys.getFirst() == null || keys.getFirst().length == 0) { HRegionLocation regLoc = getRegionLocator().getRegionLocation(HConstants.EMPTY_BYTE_ARRAY, false); if (null == regLoc) { @@ -311,9 +302,9 @@ private List oneInputSplitPerRegion() throws IOException { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - TableSplit split = new TableSplit(tableName, null, - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, regLoc - .getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); + TableSplit split = + new TableSplit(tableName, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + regLoc.getHostnamePort().split(Addressing.HOSTNAME_PORT_SEPARATOR)[0], regionSize); splits.add(split); return splits; } @@ -326,17 +317,16 @@ private List oneInputSplitPerRegion() throws IOException { byte[] startRow = scan.getStartRow(); byte[] stopRow = scan.getStopRow(); // determine if the given start an stop key fall into the region - if ((startRow.length == 0 || keys.getSecond()[i].length == 0 || - Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) && - (stopRow.length == 0 || - Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { - byte[] splitStart = startRow.length == 0 || - Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 ? - keys.getFirst()[i] : startRow; - byte[] splitStop = (stopRow.length == 0 || - Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) && - keys.getSecond()[i].length > 0 ? - keys.getSecond()[i] : stopRow; + if ((startRow.length == 0 || keys.getSecond()[i].length == 0 + || Bytes.compareTo(startRow, keys.getSecond()[i]) < 0) + && (stopRow.length == 0 || Bytes.compareTo(stopRow, keys.getFirst()[i]) > 0)) { + byte[] splitStart = + startRow.length == 0 || Bytes.compareTo(keys.getFirst()[i], startRow) >= 0 + ? keys.getFirst()[i] + : startRow; + byte[] splitStop = + (stopRow.length == 0 || Bytes.compareTo(keys.getSecond()[i], stopRow) <= 0) + && keys.getSecond()[i].length > 0 ? keys.getSecond()[i] : stopRow; HRegionLocation location = getRegionLocator().getRegionLocation(keys.getFirst()[i], false); // The below InetSocketAddress creation does a name resolution. @@ -354,8 +344,8 @@ private List oneInputSplitPerRegion() throws IOException { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - TableSplit split = new TableSplit(tableName, null, - splitStart, splitStop, regionLocation, encodedRegionName, regionSize); + TableSplit split = new TableSplit(tableName, null, splitStart, splitStop, regionLocation, + encodedRegionName, regionSize); splits.add(split); if (LOG.isDebugEnabled()) { LOG.debug("getSplits: split -> " + i + " -> " + split); @@ -368,8 +358,8 @@ private List oneInputSplitPerRegion() throws IOException { /** * Create n splits for one InputSplit, For now only support uniform distribution * @param split A TableSplit corresponding to a range of rowkeys - * @param n Number of ranges after splitting. Pass 1 means no split for the range - * Pass 2 if you want to split the range in two; + * @param n Number of ranges after splitting. Pass 1 means no split for the range Pass 2 if you + * want to split the range in two; * @return A list of TableSplit, the size of the list is n * @throws IllegalArgumentIOException throws IllegalArgumentIOException */ @@ -380,7 +370,7 @@ protected List createNInputSplitsUniform(InputSplit split, int n) "InputSplit for CreateNSplitsPerRegion can not be null + " + "and should be instance of TableSplit"); } - //if n < 1, then still continue using n = 1 + // if n < 1, then still continue using n = 1 n = n < 1 ? 1 : n; List res = new ArrayList<>(n); if (n == 1) { @@ -398,51 +388,48 @@ protected List createNInputSplitsUniform(InputSplit split, int n) byte[] endRow = ts.getEndRow(); // For special case: startRow or endRow is empty - if (startRow.length == 0 && endRow.length == 0){ + if (startRow.length == 0 && endRow.length == 0) { startRow = new byte[1]; endRow = new byte[1]; startRow[0] = 0; endRow[0] = -1; } - if (startRow.length == 0 && endRow.length != 0){ + if (startRow.length == 0 && endRow.length != 0) { startRow = new byte[1]; startRow[0] = 0; } - if (startRow.length != 0 && endRow.length == 0){ - endRow =new byte[startRow.length]; - for (int k = 0; k < startRow.length; k++){ + if (startRow.length != 0 && endRow.length == 0) { + endRow = new byte[startRow.length]; + for (int k = 0; k < startRow.length; k++) { endRow[k] = -1; } } // Split Region into n chunks evenly - byte[][] splitKeys = Bytes.split(startRow, endRow, true, n-1); + byte[][] splitKeys = Bytes.split(startRow, endRow, true, n - 1); for (int i = 0; i < splitKeys.length - 1; i++) { // In the table input format for single table we do not need to // store the scan object in table split because it can be memory intensive and redundant // information to what is already stored in conf SCAN. See HBASE-25212 - //notice that the regionSize parameter may be not very accurate - TableSplit tsplit = - new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], regionLocation, - encodedRegionName, regionSize / n); + // notice that the regionSize parameter may be not very accurate + TableSplit tsplit = new TableSplit(tableName, null, splitKeys[i], splitKeys[i + 1], + regionLocation, encodedRegionName, regionSize / n); res.add(tsplit); } return res; } + /** - * Calculates the number of MapReduce input splits for the map tasks. The number of - * MapReduce input splits depends on the average region size. - * Make it 'public' for testing - * + * Calculates the number of MapReduce input splits for the map tasks. The number of MapReduce + * input splits depends on the average region size. Make it 'public' for testing * @param splits The list of input splits before balance. * @param maxAverageRegionSize max Average region size for one mapper * @return The list of input splits. * @throws IOException When creating the list of splits fails. - * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( - *org.apache.hadoop.mapreduce.JobContext) + * @see org.apache.hadoop.mapreduce.InputFormat#getSplits( org.apache.hadoop.mapreduce.JobContext) */ - public List calculateAutoBalancedSplits(List splits, long maxAverageRegionSize) - throws IOException { + public List calculateAutoBalancedSplits(List splits, + long maxAverageRegionSize) throws IOException { if (splits.size() == 0) { return splits; } @@ -455,15 +442,16 @@ public List calculateAutoBalancedSplits(List splits, lon long averageRegionSize = totalRegionSize / splits.size(); // totalRegionSize might be overflow, and the averageRegionSize must be positive. if (averageRegionSize <= 0) { - LOG.warn("The averageRegionSize is not positive: " + averageRegionSize + ", " + - "set it to Long.MAX_VALUE " + splits.size()); + LOG.warn("The averageRegionSize is not positive: " + averageRegionSize + ", " + + "set it to Long.MAX_VALUE " + splits.size()); averageRegionSize = Long.MAX_VALUE / splits.size(); } - //if averageRegionSize is too big, change it to default as 1 GB, + // if averageRegionSize is too big, change it to default as 1 GB, if (averageRegionSize > maxAverageRegionSize) { averageRegionSize = maxAverageRegionSize; } - // if averageRegionSize is too small, we do not need to allocate more mappers for those 'large' region + // if averageRegionSize is too small, we do not need to allocate more mappers for those 'large' + // region // set default as 16M = (default hdfs block size) / 4; if (averageRegionSize < 16 * 1048576) { return splits; @@ -477,7 +465,8 @@ public List calculateAutoBalancedSplits(List splits, lon if (regionSize >= averageRegionSize) { // make this region as multiple MapReduce input split. - int n = (int) Math.round(Math.log(((double) regionSize) / ((double) averageRegionSize)) + 1.0); + int n = + (int) Math.round(Math.log(((double) regionSize) / ((double) averageRegionSize)) + 1.0); List temp = createNInputSplitsUniform(ts, n); resultList.addAll(temp); } else { @@ -533,26 +522,25 @@ String reverseDNS(InetAddress ipAddress) throws UnknownHostException { } /** - * Test if the given region is to be included in the InputSplit while splitting - * the regions of a table. + * Test if the given region is to be included in the InputSplit while splitting the regions of a + * table. *

    - * This optimization is effective when there is a specific reasoning to exclude an entire region from the M-R job, - * (and hence, not contributing to the InputSplit), given the start and end keys of the same.
    - * Useful when we need to remember the last-processed top record and revisit the [last, current) interval for M-R processing, - * continuously. In addition to reducing InputSplits, reduces the load on the region server as well, due to the ordering of the keys. - *
    + * This optimization is effective when there is a specific reasoning to exclude an entire region + * from the M-R job, (and hence, not contributing to the InputSplit), given the start and end keys + * of the same.
    + * Useful when we need to remember the last-processed top record and revisit the [last, current) + * interval for M-R processing, continuously. In addition to reducing InputSplits, reduces the + * load on the region server as well, due to the ordering of the keys.
    *
    * Note: It is possible that endKey.length() == 0 , for the last (recent) region. *
    - * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no region is excluded( i.e. all regions are included). - * - * + * Override this method, if you want to bulk exclude regions altogether from M-R. By default, no + * region is excluded( i.e. all regions are included). * @param startKey Start key of the region * @param endKey End key of the region * @return true, if this region needs to be included as part of the input (default). - * */ - protected boolean includeRegionInSplit(final byte[] startKey, final byte [] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { return true; } @@ -588,15 +576,14 @@ protected Admin getAdmin() { /** * Allows subclasses to initialize the table information. - * - * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. - * @param tableName The {@link TableName} of the table to process. + * @param connection The Connection to the HBase cluster. MUST be unmanaged. We will close. + * @param tableName The {@link TableName} of the table to process. * @throws IOException */ protected void initializeTable(Connection connection, TableName tableName) throws IOException { if (this.table != null || this.connection != null) { - LOG.warn("initializeTable called multiple times. Overwriting connection and table " + - "reference; TableInputFormatBase will not close these old references when done."); + LOG.warn("initializeTable called multiple times. Overwriting connection and table " + + "reference; TableInputFormatBase will not close these old references when done."); } this.table = connection.getTable(tableName); this.regionLocator = connection.getRegionLocator(tableName); @@ -613,7 +600,6 @@ protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, /** * Gets the scan defining the actual details like columns etc. - * * @return The internal scan instance. */ public Scan getScan() { @@ -623,8 +609,7 @@ public Scan getScan() { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.scan = scan; @@ -632,28 +617,22 @@ public void setScan(Scan scan) { /** * Allows subclasses to set the {@link TableRecordReader}. - * - * @param tableRecordReader A different {@link TableRecordReader} - * implementation. + * @param tableRecordReader A different {@link TableRecordReader} implementation. */ protected void setTableRecordReader(TableRecordReader tableRecordReader) { this.tableRecordReader = tableRecordReader; } /** - * Handle subclass specific set up. - * Each of the entry points used by the MapReduce framework, + * Handle subclass specific set up. Each of the entry points used by the MapReduce framework, * {@link #createRecordReader(InputSplit, TaskAttemptContext)} and {@link #getSplits(JobContext)}, * will call {@link #initialize(JobContext)} as a convenient centralized location to handle * retrieving the necessary configuration information and calling - * {@link #initializeTable(Connection, TableName)}. - * - * Subclasses should implement their initialize call such that it is safe to call multiple times. - * The current TableInputFormatBase implementation relies on a non-null table reference to decide - * if an initialize call is needed, but this behavior may change in the future. In particular, - * it is critical that initializeTable not be called multiple times since this will leak - * Connection instances. - * + * {@link #initializeTable(Connection, TableName)}. Subclasses should implement their initialize + * call such that it is safe to call multiple times. The current TableInputFormatBase + * implementation relies on a non-null table reference to decide if an initialize call is needed, + * but this behavior may change in the future. In particular, it is critical that initializeTable + * not be called multiple times since this will leak Connection instances. */ protected void initialize(JobContext context) throws IOException { } @@ -661,7 +640,6 @@ protected void initialize(JobContext context) throws IOException { /** * Close the Table and related objects that were initialized via * {@link #initializeTable(Connection, TableName)}. - * * @throws IOException */ protected void closeTable() throws IOException { @@ -675,7 +653,9 @@ protected void closeTable() throws IOException { private void close(Closeable... closables) throws IOException { for (Closeable c : closables) { - if(c != null) { c.close(); } + if (c != null) { + c.close(); + } } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java index eaf1b407ccc5..af7b389656fc 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,6 +17,7 @@ */ package org.apache.hadoop.hbase.mapreduce; +import com.codahale.metrics.MetricRegistry; import java.io.File; import java.io.IOException; import java.net.URL; @@ -33,24 +33,18 @@ import java.util.Set; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.TableName; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.client.RegionLocator; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; -import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; import org.apache.hadoop.hbase.security.token.TokenUtil; @@ -61,8 +55,12 @@ import org.apache.hadoop.mapreduce.InputFormat; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; -import com.codahale.metrics.MetricRegistry; +import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos; /** * Utility for {@link TableMapper} and {@link TableReducer} @@ -74,128 +72,98 @@ public class TableMapReduceUtil { public static final String TABLE_INPUT_CLASS_KEY = "hbase.table.input.class"; /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, - job, true); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, true); } - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(TableName table, - Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, + public static void initTableMapperJob(TableName table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, Job job) throws IOException { - initTableMapperJob(table.getNameAsString(), - scan, - mapper, - outputKeyClass, - outputValueClass, - job, - true); + initTableMapperJob(table.getNameAsString(), scan, mapper, outputKeyClass, outputValueClass, job, + true); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, - job, true); + public static void initTableMapperJob(byte[] table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job) throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + true); } - /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * @throws IOException When setting up the details fails. - */ - public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Class inputFormatClass) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, true, inputFormatClass); - } - + /** + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). + * @throws IOException When setting up the details fails. + */ + public static void initTableMapperJob(String table, Scan scan, + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Class inputFormatClass) + throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, true, inputFormatClass); + } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * - * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * Use this before submitting a TableMap job. It will appropriately set up the job. + * @param table The table name to read from. + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @param initCredentials whether to initialize hbase auth credentials for the job * @param inputFormatClass the input format * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, boolean initCredentials, - Class inputFormatClass) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, boolean initCredentials, + Class inputFormatClass) throws IOException { job.setInputFormatClass(inputFormatClass); if (outputValueClass != null) job.setMapOutputValueClass(outputValueClass); if (outputKeyClass != null) job.setMapOutputKeyClass(outputKeyClass); @@ -208,8 +176,8 @@ public static void initTableMapperJob(String table, Scan scan, conf.set(TableInputFormat.INPUT_TABLE, table); conf.set(TableInputFormat.SCAN, convertScanToString(scan)); conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); if (addDependencyJars) { addDependencyJars(job); } @@ -219,116 +187,99 @@ public static void initTableMapperJob(String table, Scan scan, } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @param inputFormatClass The class of the input format * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Class inputFormatClass) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, inputFormatClass); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Class inputFormatClass) + throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, inputFormatClass); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table Binary representation of the table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(byte[] table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) - throws IOException { - initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, getConfiguredInputFormat(job)); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars) throws IOException { + initTableMapperJob(Bytes.toString(table), scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, getConfiguredInputFormat(job)); } /** * @return {@link TableInputFormat} .class unless Configuration has something else at - * {@link #TABLE_INPUT_CLASS_KEY}. + * {@link #TABLE_INPUT_CLASS_KEY}. */ private static Class getConfiguredInputFormat(Job job) { - return (Class)job.getConfiguration(). - getClass(TABLE_INPUT_CLASS_KEY, TableInputFormat.class); + return (Class) job.getConfiguration().getClass(TABLE_INPUT_CLASS_KEY, + TableInputFormat.class); } /** - * Use this before submitting a TableMap job. It will appropriately set up - * the job. - * + * Use this before submitting a TableMap job. It will appropriately set up the job. * @param table The table name to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ public static void initTableMapperJob(String table, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) - throws IOException { - initTableMapperJob(table, scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, getConfiguredInputFormat(job)); + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars) throws IOException { + initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, getConfiguredInputFormat(job)); } /** - * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on - * direct memory will likely cause the map tasks to OOM when opening the region. This - * is done here instead of in TableSnapshotRegionRecordReader in case an advanced user - * wants to override this behavior in their job. + * Enable a basic on-heap cache for these jobs. Any BlockCache implementation based on direct + * memory will likely cause the map tasks to OOM when opening the region. This is done here + * instead of in TableSnapshotRegionRecordReader in case an advanced user wants to override this + * behavior in their job. */ public static void resetCacheConfig(Configuration conf) { - conf.setFloat( - HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); + conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT); conf.setFloat(HConstants.BUCKET_CACHE_SIZE_KEY, 0f); conf.unset(HConstants.BUCKET_CACHE_IOENGINE_KEY); } /** - * Sets up the job for reading from one or more table snapshots, with one or more scans - * per snapshot. - * It bypasses hbase servers and read directly from snapshot files. - * - * @param snapshotScans map of snapshot name to scans on that snapshot. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * Sets up the job for reading from one or more table snapshots, with one or more scans per + * snapshot. It bypasses hbase servers and read directly from snapshot files. + * @param snapshotScans map of snapshot name to scans on that snapshot. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). */ public static void initMultiTableSnapshotMapperJob(Map> snapshotScans, Class mapper, Class outputKeyClass, Class outputValueClass, @@ -373,11 +324,8 @@ public static void initMultiTableSnapshotMapperJob(Map> * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Path tmpRestoreDir) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir); initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); @@ -385,105 +333,85 @@ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, } /** - * Sets up the job for reading from a table snapshot. It bypasses hbase servers - * and read directly from snapshot files. - * + * Sets up the job for reading from a table snapshot. It bypasses hbase servers and read directly + * from snapshot files. * @param snapshotName The name of the snapshot (of a table) to read from. - * @param scan The scan instance with the columns, time range etc. - * @param mapper The mapper class to use. - * @param outputKeyClass The class of the output key. - * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). - * + * @param scan The scan instance with the columns, time range etc. + * @param mapper The mapper class to use. + * @param outputKeyClass The class of the output key. + * @param outputValueClass The class of the output value. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @param tmpRestoreDir a temporary directory to copy the snapshot files into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restore directory can be deleted. + * have write permissions to this directory, and this should not be a subdirectory of + * rootdir. After the job is finished, restore directory can be deleted. * @param splitAlgo algorithm to split * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException When setting up the details fails. * @see TableSnapshotInputFormat */ public static void initTableSnapshotMapperJob(String snapshotName, Scan scan, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, Path tmpRestoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, - int numSplitsPerRegion) - throws IOException { + Class mapper, Class outputKeyClass, Class outputValueClass, + Job job, boolean addDependencyJars, Path tmpRestoreDir, + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { TableSnapshotInputFormat.setInput(job, snapshotName, tmpRestoreDir, splitAlgo, - numSplitsPerRegion); - initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, - outputValueClass, job, addDependencyJars, false, TableSnapshotInputFormat.class); + numSplitsPerRegion); + initTableMapperJob(snapshotName, scan, mapper, outputKeyClass, outputValueClass, job, + addDependencyJars, false, TableSnapshotInputFormat.class); resetCacheConfig(job.getConfiguration()); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. * @param scans The list of {@link Scan} objects to read from. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job) throws IOException { - initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, - true); + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job) throws IOException { + initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, true); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. * @param scans The list of {@link Scan} objects to read from. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the - * configured job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars) throws IOException { - initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, - addDependencyJars, true); + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job, boolean addDependencyJars) + throws IOException { + initTableMapperJob(scans, mapper, outputKeyClass, outputValueClass, job, addDependencyJars, + true); } /** - * Use this before submitting a Multi TableMap job. It will appropriately set - * up the job. - * + * Use this before submitting a Multi TableMap job. It will appropriately set up the job. * @param scans The list of {@link Scan} objects to read from. * @param mapper The mapper class to use. * @param outputKeyClass The class of the output key. * @param outputValueClass The class of the output value. - * @param job The current job to adjust. Make sure the passed job is carrying - * all necessary HBase configuration. - * @param addDependencyJars upload HBase jars and jars for any of the - * configured job classes via the distributed cache (tmpjars). + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @param initCredentials whether to initialize hbase auth credentials for the job * @throws IOException When setting up the details fails. */ - public static void initTableMapperJob(List scans, - Class mapper, - Class outputKeyClass, - Class outputValueClass, Job job, - boolean addDependencyJars, + public static void initTableMapperJob(List scans, Class mapper, + Class outputKeyClass, Class outputValueClass, Job job, boolean addDependencyJars, boolean initCredentials) throws IOException { job.setInputFormatClass(MultiTableInputFormat.class); if (outputValueClass != null) { @@ -518,7 +446,7 @@ public static void initCredentials(Job job) throws IOException { // propagate delegation related props from launcher job to MR job if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) { job.getConfiguration().set("mapreduce.job.credentials.binary", - System.getenv("HADOOP_TOKEN_FILE_LOCATION")); + System.getenv("HADOOP_TOKEN_FILE_LOCATION")); } } @@ -529,7 +457,7 @@ public static void initCredentials(Job job) throws IOException { User user = userProvider.getCurrent(); if (quorumAddress != null) { Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); + quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX); Connection peerConn = ConnectionFactory.createConnection(peerConf); try { TokenUtil.addTokenForJob(peerConn, user, job); @@ -552,39 +480,33 @@ public static void initCredentials(Job job) throws IOException { } /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user - * and add it to the credentials for the given map reduce job. - * - * The quorumAddress is the key to the ZK ensemble, which contains: - * hbase.zookeeper.quorum, hbase.zookeeper.client.port and + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. The quorumAddress is the key to the ZK + * ensemble, which contains: hbase.zookeeper.quorum, hbase.zookeeper.client.port and * zookeeper.znode.parent - * * @param job The job that requires the permission. * @param quorumAddress string that contains the 3 required configuratins * @throws IOException When the authentication token cannot be obtained. * @deprecated Since 1.2.0 and will be removed in 3.0.0. Use - * {@link #initCredentialsForCluster(Job, Configuration)} instead. + * {@link #initCredentialsForCluster(Job, Configuration)} instead. * @see #initCredentialsForCluster(Job, Configuration) * @see HBASE-14886 */ @Deprecated - public static void initCredentialsForCluster(Job job, String quorumAddress) - throws IOException { - Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(), - quorumAddress); + public static void initCredentialsForCluster(Job job, String quorumAddress) throws IOException { + Configuration peerConf = + HBaseConfiguration.createClusterConf(job.getConfiguration(), quorumAddress); initCredentialsForCluster(job, peerConf); } /** - * Obtain an authentication token, for the specified cluster, on behalf of the current user - * and add it to the credentials for the given map reduce job. - * + * Obtain an authentication token, for the specified cluster, on behalf of the current user and + * add it to the credentials for the given map reduce job. * @param job The job that requires the permission. * @param conf The configuration to use in connecting to the peer cluster * @throws IOException When the authentication token cannot be obtained. */ - public static void initCredentialsForCluster(Job job, Configuration conf) - throws IOException { + public static void initCredentialsForCluster(Job job, Configuration conf) throws IOException { UserProvider userProvider = UserProvider.instantiate(conf); if (userProvider.isHBaseSecurityEnabled()) { try { @@ -603,8 +525,7 @@ public static void initCredentialsForCluster(Job job, Configuration conf) /** * Writes the given scan into a Base64 encoded string. - * - * @param scan The scan to write out. + * @param scan The scan to write out. * @return The scan saved in a Base64 encoded string. * @throws IOException When writing the scan fails. */ @@ -615,110 +536,92 @@ public static String convertScanToString(Scan scan) throws IOException { /** * Converts the given Base64 string back into a Scan instance. - * - * @param base64 The scan details. + * @param base64 The scan details. * @return The newly created Scan instance. * @throws IOException When reading the scan instance fails. */ public static Scan convertStringToScan(String base64) throws IOException { - byte [] decoded = Base64.getDecoder().decode(base64); + byte[] decoded = Base64.getDecoder().decode(base64); return ProtobufUtil.toScan(ClientProtos.Scan.parseFrom(decoded)); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job) - throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job) throws IOException { initTableReducerJob(table, reducer, job, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. + * @param partitioner Partitioner to use. Pass null to use default partitioner. * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner) throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner) throws IOException { initTableReducerJob(table, reducer, job, partitioner, null, null, null); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param quorumAddress Distant cluster to write to; default is null for - * output to the cluster that is designated in hbase-site.xml. - * Set this String to the zookeeper ensemble of an alternate remote cluster - * when you would have the reduce write a cluster that is other than the - * default; e.g. copying tables between clusters, the source would be - * designated by hbase-site.xml and this param would have the - * ensemble address of the remote cluster. The format to pass is particular. - * Pass <hbase.zookeeper.quorum>:< + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to the zookeeper + * ensemble of an alternate remote cluster when you would have the reduce write a cluster + * that is other than the default; e.g. copying tables between clusters, the source would + * be designated by hbase-site.xml and this param would have the ensemble + * address of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> * such as server,server2,server3:2181:/hbase. * @param serverClass redefined hbase.regionserver.class * @param serverImpl redefined hbase.regionserver.impl * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner, String quorumAddress, String serverClass, - String serverImpl) throws IOException { - initTableReducerJob(table, reducer, job, partitioner, quorumAddress, - serverClass, serverImpl, true); + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl) + throws IOException { + initTableReducerJob(table, reducer, job, partitioner, quorumAddress, serverClass, serverImpl, + true); } /** - * Use this before submitting a TableReduce job. It will - * appropriately set up the JobConf. - * - * @param table The output table. - * @param reducer The reducer class to use. - * @param job The current job to adjust. Make sure the passed job is - * carrying all necessary HBase configuration. - * @param partitioner Partitioner to use. Pass null to use - * default partitioner. - * @param quorumAddress Distant cluster to write to; default is null for - * output to the cluster that is designated in hbase-site.xml. - * Set this String to the zookeeper ensemble of an alternate remote cluster - * when you would have the reduce write a cluster that is other than the - * default; e.g. copying tables between clusters, the source would be - * designated by hbase-site.xml and this param would have the - * ensemble address of the remote cluster. The format to pass is particular. - * Pass <hbase.zookeeper.quorum>:< + * Use this before submitting a TableReduce job. It will appropriately set up the JobConf. + * @param table The output table. + * @param reducer The reducer class to use. + * @param job The current job to adjust. Make sure the passed job is carrying all necessary HBase + * configuration. + * @param partitioner Partitioner to use. Pass null to use default partitioner. + * @param quorumAddress Distant cluster to write to; default is null for output to the cluster + * that is designated in hbase-site.xml. Set this String to the zookeeper + * ensemble of an alternate remote cluster when you would have the reduce write a cluster + * that is other than the default; e.g. copying tables between clusters, the source would + * be designated by hbase-site.xml and this param would have the ensemble + * address of the remote cluster. The format to pass is particular. Pass + * <hbase.zookeeper.quorum>:< * hbase.zookeeper.client.port>:<zookeeper.znode.parent> * such as server,server2,server3:2181:/hbase. * @param serverClass redefined hbase.regionserver.class * @param serverImpl redefined hbase.regionserver.impl - * @param addDependencyJars upload HBase jars and jars for any of the configured - * job classes via the distributed cache (tmpjars). + * @param addDependencyJars upload HBase jars and jars for any of the configured job classes via + * the distributed cache (tmpjars). * @throws IOException When determining the region count fails. */ - public static void initTableReducerJob(String table, - Class reducer, Job job, - Class partitioner, String quorumAddress, String serverClass, - String serverImpl, boolean addDependencyJars) throws IOException { + public static void initTableReducerJob(String table, Class reducer, + Job job, Class partitioner, String quorumAddress, String serverClass, String serverImpl, + boolean addDependencyJars) throws IOException { Configuration conf = job.getConfiguration(); HBaseConfiguration.merge(conf, HBaseConfiguration.create(conf)); @@ -726,12 +629,12 @@ public static void initTableReducerJob(String table, if (reducer != null) job.setReducerClass(reducer); conf.set(TableOutputFormat.OUTPUT_TABLE, table); conf.setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName()); // If passed a quorum/ensemble address, pass it on to TableOutputFormat. if (quorumAddress != null) { // Calling this will validate the format ZKConfig.validateClusterKey(quorumAddress); - conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress); + conf.set(TableOutputFormat.QUORUM_ADDRESS, quorumAddress); } if (serverClass != null && serverImpl != null) { conf.set(TableOutputFormat.REGION_SERVER_CLASS, serverClass); @@ -757,11 +660,10 @@ public static void initTableReducerJob(String table, } /** - * Ensures that the given number of reduce tasks for the given job - * configuration does not exceed the number of regions for the given table. - * - * @param table The table to get the region count for. - * @param job The current job to adjust. + * Ensures that the given number of reduce tasks for the given job configuration does not exceed + * the number of regions for the given table. + * @param table The table to get the region count for. + * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ public static void limitNumReduceTasks(String table, Job job) throws IOException { @@ -772,11 +674,10 @@ public static void limitNumReduceTasks(String table, Job job) throws IOException } /** - * Sets the number of reduce tasks for the given job configuration to the - * number of regions the given table has. - * - * @param table The table to get the region count for. - * @param job The current job to adjust. + * Sets the number of reduce tasks for the given job configuration to the number of regions the + * given table has. + * @param table The table to get the region count for. + * @param job The current job to adjust. * @throws IOException When retrieving the table details fails. */ public static void setNumReduceTasks(String table, Job job) throws IOException { @@ -784,13 +685,11 @@ public static void setNumReduceTasks(String table, Job job) throws IOException { } /** - * Sets the number of rows to return and cache with each scanner iteration. - * Higher caching values will enable faster mapreduce jobs at the expense of - * requiring more heap to contain the cached rows. - * + * Sets the number of rows to return and cache with each scanner iteration. Higher caching values + * will enable faster mapreduce jobs at the expense of requiring more heap to contain the cached + * rows. * @param job The current job to adjust. - * @param batchSize The number of rows to return in batch with each scanner - * iteration. + * @param batchSize The number of rows to return in batch with each scanner iteration. */ public static void setScannerCaching(Job job, int batchSize) { job.getConfiguration().setInt("hbase.client.scanner.caching", batchSize); @@ -799,10 +698,9 @@ public static void setScannerCaching(Job job, int batchSize) { /** * Add HBase and its dependencies (only) to the job configuration. *

    - * This is intended as a low-level API, facilitating code reuse between this - * class and its mapred counterpart. It also of use to external tools that - * need to build a MapReduce job that interacts with HBase but want - * fine-grained control over the jars shipped to the cluster. + * This is intended as a low-level API, facilitating code reuse between this class and its mapred + * counterpart. It also of use to external tools that need to build a MapReduce job that interacts + * with HBase but want fine-grained control over the jars shipped to the cluster. *

    * @param conf The Configuration object to extend with dependencies. * @see org.apache.hadoop.hbase.mapred.TableMapReduceUtil @@ -811,35 +709,35 @@ public static void setScannerCaching(Job job, int batchSize) { public static void addHBaseDependencyJars(Configuration conf) throws IOException { addDependencyJarsForClasses(conf, // explicitly pull a class from each module - org.apache.hadoop.hbase.HConstants.class, // hbase-common + org.apache.hadoop.hbase.HConstants.class, // hbase-common org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, // hbase-protocol org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, // hbase-protocol-shaded - org.apache.hadoop.hbase.client.Put.class, // hbase-client - org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server - org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat - org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat - org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce - org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics - org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api - org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication - org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http - org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure - org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper + org.apache.hadoop.hbase.client.Put.class, // hbase-client + org.apache.hadoop.hbase.ipc.RpcServer.class, // hbase-server + org.apache.hadoop.hbase.CompatibilityFactory.class, // hbase-hadoop-compat + org.apache.hadoop.hbase.mapreduce.JobUtil.class, // hbase-hadoop2-compat + org.apache.hadoop.hbase.mapreduce.TableMapper.class, // hbase-mapreduce + org.apache.hadoop.hbase.metrics.impl.FastLongHistogram.class, // hbase-metrics + org.apache.hadoop.hbase.metrics.Snapshot.class, // hbase-metrics-api + org.apache.hadoop.hbase.replication.ReplicationUtils.class, // hbase-replication + org.apache.hadoop.hbase.http.HttpServer.class, // hbase-http + org.apache.hadoop.hbase.procedure2.Procedure.class, // hbase-procedure + org.apache.hadoop.hbase.zookeeper.ZKWatcher.class, // hbase-zookeeper org.apache.hbase.thirdparty.com.google.common.collect.Lists.class, // hb-shaded-miscellaneous org.apache.hbase.thirdparty.com.google.gson.GsonBuilder.class, // hbase-shaded-gson org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations.class, // hb-sh-protobuf - org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty - org.apache.zookeeper.ZooKeeper.class, // zookeeper - com.google.protobuf.Message.class, // protobuf - com.codahale.metrics.MetricRegistry.class, // metrics-core - org.apache.commons.lang3.ArrayUtils.class, // commons-lang - io.opentelemetry.api.trace.Span.class, // opentelemetry-api + org.apache.hbase.thirdparty.io.netty.channel.Channel.class, // hbase-shaded-netty + org.apache.zookeeper.ZooKeeper.class, // zookeeper + com.google.protobuf.Message.class, // protobuf + com.codahale.metrics.MetricRegistry.class, // metrics-core + org.apache.commons.lang3.ArrayUtils.class, // commons-lang + io.opentelemetry.api.trace.Span.class, // opentelemetry-api io.opentelemetry.semconv.trace.attributes.SemanticAttributes.class); // opentelemetry-semconv } /** - * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. - * Also exposed to shell scripts via `bin/hbase mapredcp`. + * Returns a classpath string built from the content of the "tmpjars" value in {@code conf}. Also + * exposed to shell scripts via `bin/hbase mapredcp`. */ public static String buildDependencyClasspath(Configuration conf) { if (conf == null) { @@ -861,63 +759,52 @@ public static String buildDependencyClasspath(Configuration conf) { } /** - * Add the HBase dependency jars as well as jars for any of the configured - * job classes to the job configuration, so that JobClient will ship them - * to the cluster and add them to the DistributedCache. + * Add the HBase dependency jars as well as jars for any of the configured job classes to the job + * configuration, so that JobClient will ship them to the cluster and add them to the + * DistributedCache. */ public static void addDependencyJars(Job job) throws IOException { addHBaseDependencyJars(job.getConfiguration()); try { addDependencyJarsForClasses(job.getConfiguration(), - // when making changes here, consider also mapred.TableMapReduceUtil - // pull job classes - job.getMapOutputKeyClass(), - job.getMapOutputValueClass(), - job.getInputFormatClass(), - job.getOutputKeyClass(), - job.getOutputValueClass(), - job.getOutputFormatClass(), - job.getPartitionerClass(), - job.getCombinerClass()); + // when making changes here, consider also mapred.TableMapReduceUtil + // pull job classes + job.getMapOutputKeyClass(), job.getMapOutputValueClass(), job.getInputFormatClass(), + job.getOutputKeyClass(), job.getOutputValueClass(), job.getOutputFormatClass(), + job.getPartitionerClass(), job.getCombinerClass()); } catch (ClassNotFoundException e) { throw new IOException(e); } } /** - * Add the jars containing the given classes to the job's configuration - * such that JobClient will ship them to the cluster and add them to - * the DistributedCache. + * Add the jars containing the given classes to the job's configuration such that JobClient will + * ship them to the cluster and add them to the DistributedCache. * @deprecated since 1.3.0 and will be removed in 3.0.0. Use {@link #addDependencyJars(Job)} - * instead. + * instead. * @see #addDependencyJars(Job) * @see HBASE-8386 */ @Deprecated - public static void addDependencyJars(Configuration conf, - Class... classes) throws IOException { + public static void addDependencyJars(Configuration conf, Class... classes) throws IOException { LOG.warn("The addDependencyJars(Configuration, Class...) method has been deprecated since it" - + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " + - "instead. See HBASE-8386 for more details."); + + " is easy to use incorrectly. Most users should rely on addDependencyJars(Job) " + + "instead. See HBASE-8386 for more details."); addDependencyJarsForClasses(conf, classes); } /** - * Add the jars containing the given classes to the job's configuration - * such that JobClient will ship them to the cluster and add them to - * the DistributedCache. - * - * N.B. that this method at most adds one jar per class given. If there is more than one - * jar available containing a class with the same name as a given class, we don't define - * which of those jars might be chosen. - * + * Add the jars containing the given classes to the job's configuration such that JobClient will + * ship them to the cluster and add them to the DistributedCache. N.B. that this method at most + * adds one jar per class given. If there is more than one jar available containing a class with + * the same name as a given class, we don't define which of those jars might be chosen. * @param conf The Hadoop Configuration to modify * @param classes will add just those dependencies needed to find the given classes * @throws IOException if an underlying library call fails. */ @InterfaceAudience.Private - public static void addDependencyJarsForClasses(Configuration conf, - Class... classes) throws IOException { + public static void addDependencyJarsForClasses(Configuration conf, Class... classes) + throws IOException { FileSystem localFs = FileSystem.getLocal(conf); Set jars = new HashSet<>(); @@ -934,13 +821,11 @@ public static void addDependencyJarsForClasses(Configuration conf, Path path = findOrCreateJar(clazz, localFs, packagedClasses); if (path == null) { - LOG.warn("Could not find jar for class " + clazz + - " in order to ship it to the cluster."); + LOG.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster."); continue; } if (!localFs.exists(path)) { - LOG.warn("Could not validate jar file " + path + " for class " - + clazz); + LOG.warn("Could not validate jar file " + path + " for class " + clazz); continue; } jars.add(path.toString()); @@ -951,12 +836,11 @@ public static void addDependencyJarsForClasses(Configuration conf, } /** - * Finds the Jar for a class or creates it if it doesn't exist. If the class is in - * a directory in the classpath, it creates a Jar on the fly with the - * contents of the directory and returns the path to that Jar. If a Jar is - * created, it is created in the system temporary directory. Otherwise, - * returns an existing jar that contains a class of the same name. Maintains - * a mapping from jar contents to the tmp jar created. + * Finds the Jar for a class or creates it if it doesn't exist. If the class is in a directory in + * the classpath, it creates a Jar on the fly with the contents of the directory and returns the + * path to that Jar. If a Jar is created, it is created in the system temporary directory. + * Otherwise, returns an existing jar that contains a class of the same name. Maintains a mapping + * from jar contents to the tmp jar created. * @param my_class the class to find. * @param fs the FileSystem with which to qualify the returned path. * @param packagedClasses a map of class name to path. @@ -964,8 +848,7 @@ public static void addDependencyJarsForClasses(Configuration conf, * @throws IOException */ private static Path findOrCreateJar(Class my_class, FileSystem fs, - Map packagedClasses) - throws IOException { + Map packagedClasses) throws IOException { // attempt to locate an existing jar for the class. String jar = findContainingJar(my_class, packagedClasses); if (null == jar || jar.isEmpty()) { @@ -982,12 +865,13 @@ private static Path findOrCreateJar(Class my_class, FileSystem fs, } /** - * Add entries to packagedClasses corresponding to class files - * contained in jar. + * Add entries to packagedClasses corresponding to class files contained in + * jar. * @param jar The jar who's content to list. * @param packagedClasses map[class -> jar] */ - private static void updateMap(String jar, Map packagedClasses) throws IOException { + private static void updateMap(String jar, Map packagedClasses) + throws IOException { if (null == jar || jar.isEmpty()) { return; } @@ -1006,10 +890,9 @@ private static void updateMap(String jar, Map packagedClasses) t } /** - * Find a jar that contains a class of the same name, if any. It will return - * a jar file, even if that is not the first thing on the class path that - * has a class with the same name. Looks first on the classpath and then in - * the packagedClasses map. + * Find a jar that contains a class of the same name, if any. It will return a jar file, even if + * that is not the first thing on the class path that has a class with the same name. Looks first + * on the classpath and then in the packagedClasses map. * @param my_class the class to find. * @return a jar file that contains the class, or null. * @throws IOException @@ -1048,9 +931,8 @@ private static String findContainingJar(Class my_class, Map p } /** - * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job - * configuration contexts (HBASE-8140) and also for testing on MRv2. - * check if we have HADOOP-9426. + * Invoke 'getJar' on a custom JarFinder implementation. Useful for some job configuration + * contexts (HBASE-8140) and also for testing on MRv2. check if we have HADOOP-9426. * @param my_class the class to find. * @return a jar file that contains the class, or null. */ @@ -1068,7 +950,7 @@ private static String getJar(Class my_class) { private static int getRegionCount(Configuration conf, TableName tableName) throws IOException { try (Connection conn = ConnectionFactory.createConnection(conf); - RegionLocator locator = conn.getRegionLocator(tableName)) { + RegionLocator locator = conn.getRegionLocator(tableName)) { return locator.getAllRegionLocations().size(); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java index 3a63bc60ab25..8ff8c240a6d3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapper.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,21 +17,19 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; /** - * Extends the base Mapper class to add the required input key - * and value classes. - * - * @param The type of the key. - * @param The type of the value. + * Extends the base Mapper class to add the required input key and value classes. + * @param The type of the key. + * @param The type of the value. * @see org.apache.hadoop.mapreduce.Mapper */ @InterfaceAudience.Public public abstract class TableMapper -extends Mapper { + extends Mapper { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java index e02ba5f54357..a59659534913 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputCommitter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,11 +18,10 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.mapreduce.JobContext; import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; /** * Small committer class that does not do anything. @@ -60,8 +58,6 @@ public boolean isRecoverySupported() { return true; } - public void recoverTask(TaskAttemptContext taskContext) - throws IOException - { + public void recoverTask(TaskAttemptContext taskContext) throws IOException { } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java index 8da8d83d9231..a0121cd90bc8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableOutputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,13 +42,11 @@ import org.slf4j.LoggerFactory; /** - * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored - * while the output value must be either a {@link Put} or a - * {@link Delete} instance. + * Convert Map/Reduce output and write it to an HBase table. The KEY is ignored while the output + * value must be either a {@link Put} or a {@link Delete} instance. */ @InterfaceAudience.Public -public class TableOutputFormat extends OutputFormat -implements Configurable { +public class TableOutputFormat extends OutputFormat implements Configurable { private static final Logger LOG = LoggerFactory.getLogger(TableOutputFormat.class); @@ -57,20 +54,19 @@ public class TableOutputFormat extends OutputFormat public static final String OUTPUT_TABLE = "hbase.mapred.outputtable"; /** - * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. - * For keys matching this prefix, the prefix is stripped, and the value is set in the - * configuration with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" - * would be set in the configuration as "key1 = value1". Use this to set properties - * which should only be applied to the {@code TableOutputFormat} configuration and not the - * input configuration. + * Prefix for configuration property overrides to apply in {@link #setConf(Configuration)}. For + * keys matching this prefix, the prefix is stripped, and the value is set in the configuration + * with the resulting key, ie. the entry "hbase.mapred.output.key1 = value1" would be set in the + * configuration as "key1 = value1". Use this to set properties which should only be applied to + * the {@code TableOutputFormat} configuration and not the input configuration. */ public static final String OUTPUT_CONF_PREFIX = "hbase.mapred.output."; /** - * Optional job parameter to specify a peer cluster. - * Used specifying remote cluster when copying between hbase clusters (the - * source is picked up from hbase-site.xml). - * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, Class, String, String, String) + * Optional job parameter to specify a peer cluster. Used specifying remote cluster when copying + * between hbase clusters (the source is picked up from hbase-site.xml). + * @see TableMapReduceUtil#initTableReducerJob(String, Class, org.apache.hadoop.mapreduce.Job, + * Class, String, String, String) */ public static final String QUORUM_ADDRESS = OUTPUT_CONF_PREFIX + "quorum"; @@ -78,11 +74,9 @@ public class TableOutputFormat extends OutputFormat public static final String QUORUM_PORT = OUTPUT_CONF_PREFIX + "quorum.port"; /** Optional specification of the rs class name of the peer cluster */ - public static final String - REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; + public static final String REGION_SERVER_CLASS = OUTPUT_CONF_PREFIX + "rs.class"; /** Optional specification of the rs impl name of the peer cluster */ - public static final String - REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; + public static final String REGION_SERVER_IMPL = OUTPUT_CONF_PREFIX + "rs.impl"; /** The configuration. */ private Configuration conf = null; @@ -90,26 +84,24 @@ public class TableOutputFormat extends OutputFormat /** * Writes the reducer output to an HBase table. */ - protected class TableRecordWriter - extends RecordWriter { + protected class TableRecordWriter extends RecordWriter { private Connection connection; private BufferedMutator mutator; /** * @throws IOException - * */ public TableRecordWriter() throws IOException { String tableName = conf.get(OUTPUT_TABLE); this.connection = ConnectionFactory.createConnection(conf); this.mutator = connection.getBufferedMutator(TableName.valueOf(tableName)); - LOG.info("Created table instance for " + tableName); + LOG.info("Created table instance for " + tableName); } + /** * Closes the writer, in this case flush table commits. - * - * @param context The context. + * @param context The context. * @throws IOException When closing the writer fails. * @see RecordWriter#close(TaskAttemptContext) */ @@ -128,15 +120,13 @@ public void close(TaskAttemptContext context) throws IOException { /** * Writes a key/value pair into the table. - * - * @param key The key. - * @param value The value. + * @param key The key. + * @param value The value. * @throws IOException When writing fails. * @see RecordWriter#write(Object, Object) */ @Override - public void write(KEY key, Mutation value) - throws IOException { + public void write(KEY key, Mutation value) throws IOException { if (!(value instanceof Put) && !(value instanceof Delete)) { throw new IOException("Pass a Delete or a Put"); } @@ -145,14 +135,11 @@ public void write(KEY key, Mutation value) } /** - * Creates a new record writer. - * - * Be aware that the baseline javadoc gives the impression that there is a single - * {@link RecordWriter} per job but in HBase, it is more natural if we give you a new + * Creates a new record writer. Be aware that the baseline javadoc gives the impression that there + * is a single {@link RecordWriter} per job but in HBase, it is more natural if we give you a new * RecordWriter per call of this method. You must close the returned RecordWriter when done. * Failure to do so will drop writes. - * - * @param context The current task context. + * @param context The current task context. * @return The newly created writer instance. * @throws IOException When creating the writer fails. * @throws InterruptedException When the job is cancelled. @@ -165,8 +152,7 @@ public RecordWriter getRecordWriter(TaskAttemptContext context) /** * Checks if the output table exists and is enabled. - * - * @param context The current context. + * @param context The current context. * @throws IOException When the check fails. * @throws InterruptedException When the job is aborted. * @see OutputFormat#checkOutputSpecs(JobContext) @@ -179,24 +165,23 @@ public void checkOutputSpecs(JobContext context) throws IOException, Interrupted } try (Connection connection = ConnectionFactory.createConnection(hConf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { TableName tableName = TableName.valueOf(hConf.get(OUTPUT_TABLE)); if (!admin.tableExists(tableName)) { - throw new TableNotFoundException("Can't write, table does not exist:" + - tableName.getNameAsString()); + throw new TableNotFoundException( + "Can't write, table does not exist:" + tableName.getNameAsString()); } if (!admin.isTableEnabled(tableName)) { - throw new TableNotEnabledException("Can't write, table is not enabled: " + - tableName.getNameAsString()); + throw new TableNotEnabledException( + "Can't write, table is not enabled: " + tableName.getNameAsString()); } } } /** * Returns the output committer. - * - * @param context The current context. + * @param context The current context. * @return The committer. * @throws IOException When creating the committer fails. * @throws InterruptedException When the job is aborted. @@ -204,7 +189,7 @@ public void checkOutputSpecs(JobContext context) throws IOException, Interrupted */ @Override public OutputCommitter getOutputCommitter(TaskAttemptContext context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { return new TableOutputCommitter(); } @@ -216,7 +201,7 @@ public Configuration getConf() { @Override public void setConf(Configuration otherConf) { String tableName = otherConf.get(OUTPUT_TABLE); - if(tableName == null || tableName.length() <= 0) { + if (tableName == null || tableName.length() <= 0) { throw new IllegalArgumentException("Must specify table name"); } @@ -234,7 +219,7 @@ public void setConf(Configuration otherConf) { if (zkClientPort != 0) { this.conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort); } - } catch(IOException e) { + } catch (IOException e) { LOG.error(e.toString(), e); throw new RuntimeException(e); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java index 512c22f9cc9c..997ea0775097 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,8 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.client.Table; @@ -28,21 +25,19 @@ import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; +import org.apache.yetus.audience.InterfaceAudience; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) - * pairs. + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) pairs. */ @InterfaceAudience.Public -public class TableRecordReader -extends RecordReader { +public class TableRecordReader extends RecordReader { private TableRecordReaderImpl recordReaderImpl = new TableRecordReaderImpl(); /** * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow The first row to start at. + * @param firstRow The first row to start at. * @throws IOException When restarting fails. */ public void restart(byte[] firstRow) throws IOException { @@ -58,8 +53,7 @@ public void setTable(Table table) { /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.recordReaderImpl.setScan(scan); @@ -67,7 +61,6 @@ public void setScan(Scan scan) { /** * Closes the split. - * * @see org.apache.hadoop.mapreduce.RecordReader#close() */ @Override @@ -77,21 +70,18 @@ public void close() { /** * Returns the current key. - * * @return The current key. * @throws IOException * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#getCurrentKey() */ @Override - public ImmutableBytesWritable getCurrentKey() throws IOException, - InterruptedException { + public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return this.recordReaderImpl.getCurrentKey(); } /** * Returns the current value. - * * @return The current value. * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. @@ -104,25 +94,21 @@ public Result getCurrentValue() throws IOException, InterruptedException { /** * Initializes the reader. - * - * @param inputsplit The split to work with. - * @param context The current task context. + * @param inputsplit The split to work with. + * @param context The current task context. * @throws IOException When setting up the reader fails. * @throws InterruptedException When the job is aborted. * @see org.apache.hadoop.mapreduce.RecordReader#initialize( - * org.apache.hadoop.mapreduce.InputSplit, - * org.apache.hadoop.mapreduce.TaskAttemptContext) + * org.apache.hadoop.mapreduce.InputSplit, org.apache.hadoop.mapreduce.TaskAttemptContext) */ @Override - public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { this.recordReaderImpl.initialize(inputsplit, context); } /** * Positions the record reader to the next record. - * * @return true if there was another record. * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. @@ -135,7 +121,6 @@ public boolean nextKeyValue() throws IOException, InterruptedException { /** * The current progress of the record reader through its data. - * * @return A number between 0.0 and 1.0, the fraction of the data read. * @see org.apache.hadoop.mapreduce.RecordReader#getProgress() */ diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java index 9c58a4c1cc43..42d090c31524 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableRecordReaderImpl.java @@ -39,13 +39,11 @@ import org.slf4j.LoggerFactory; /** - * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) - * pairs. + * Iterate over an HBase table data, return (ImmutableBytesWritable, Result) pairs. */ @InterfaceAudience.Public public class TableRecordReaderImpl { - public static final String LOG_PER_ROW_COUNT - = "hbase.mapreduce.log.scanner.rowcount"; + public static final String LOG_PER_ROW_COUNT = "hbase.mapreduce.log.scanner.rowcount"; private static final Logger LOG = LoggerFactory.getLogger(TableRecordReaderImpl.class); @@ -70,8 +68,7 @@ public class TableRecordReaderImpl { /** * Restart from survivable exceptions by creating a new scanner. - * - * @param firstRow The first row to start at. + * @param firstRow The first row to start at. * @throws IOException When restarting fails. */ public void restart(byte[] firstRow) throws IOException { @@ -97,18 +94,17 @@ public void restart(byte[] firstRow) throws IOException { } /** - * In new mapreduce APIs, TaskAttemptContext has two getCounter methods - * Check if getCounter(String, String) method is available. + * In new mapreduce APIs, TaskAttemptContext has two getCounter methods Check if + * getCounter(String, String) method is available. * @return The getCounter method or null if not available. * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 */ @Deprecated protected static Method retrieveGetCounterWithStringsParams(TaskAttemptContext context) - throws IOException { + throws IOException { Method m = null; try { - m = context.getClass().getMethod("getCounter", - new Class [] {String.class, String.class}); + m = context.getClass().getMethod("getCounter", new Class[] { String.class, String.class }); } catch (SecurityException e) { throw new IOException("Failed test for getCounter", e); } catch (NoSuchMethodException e) { @@ -119,21 +115,19 @@ protected static Method retrieveGetCounterWithStringsParams(TaskAttemptContext c /** * Sets the HBase table. - * - * @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan. + * @param htable The {@link org.apache.hadoop.hbase.HTableDescriptor} to scan. */ public void setHTable(Table htable) { Configuration conf = htable.getConfiguration(); logScannerActivity = conf.getBoolean( - "hbase.client.log.scanner.activity" /*ScannerCallable.LOG_SCANNER_ACTIVITY*/, false); + "hbase.client.log.scanner.activity" /* ScannerCallable.LOG_SCANNER_ACTIVITY */, false); logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100); this.htable = htable; } /** * Sets the scan defining the actual details like columns etc. - * - * @param scan The scan to set. + * @param scan The scan to set. */ public void setScan(Scan scan) { this.scan = scan; @@ -142,9 +136,8 @@ public void setScan(Scan scan) { /** * Build the scanner. Not done in constructor to allow for extension. */ - public void initialize(InputSplit inputsplit, - TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit inputsplit, TaskAttemptContext context) + throws IOException, InterruptedException { if (context != null) { this.context = context; } @@ -153,8 +146,6 @@ public void initialize(InputSplit inputsplit, /** * Closes the split. - * - * */ public void close() { if (this.scanner != null) { @@ -169,18 +160,15 @@ public void close() { /** * Returns the current key. - * * @return The current key. * @throws InterruptedException When the job is aborted. */ - public ImmutableBytesWritable getCurrentKey() throws IOException, - InterruptedException { + public ImmutableBytesWritable getCurrentKey() throws IOException, InterruptedException { return key; } /** * Returns the current value. - * * @return The current value. * @throws IOException When the value is faulty. * @throws InterruptedException When the job is aborted. @@ -189,10 +177,8 @@ public Result getCurrentValue() throws IOException, InterruptedException { return value; } - /** * Positions the record reader to the next record. - * * @return true if there was another record. * @throws IOException When reading the record failed. * @throws InterruptedException When the job was aborted. @@ -211,7 +197,7 @@ public boolean nextKeyValue() throws IOException, InterruptedException { numStale++; } if (logScannerActivity) { - rowcount ++; + rowcount++; if (rowcount >= logPerRowCount) { long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); @@ -229,16 +215,16 @@ public boolean nextKeyValue() throws IOException, InterruptedException { // the scanner, if the second call fails, it will be rethrown LOG.info("recovered from " + StringUtils.stringifyException(e)); if (lastSuccessfulRow == null) { - LOG.warn("We are restarting the first next() invocation," + - " if your mapper has restarted a few other times like this" + - " then you should consider killing this job and investigate" + - " why it's taking so long."); + LOG.warn("We are restarting the first next() invocation," + + " if your mapper has restarted a few other times like this" + + " then you should consider killing this job and investigate" + + " why it's taking so long."); } if (lastSuccessfulRow == null) { restart(scan.getStartRow()); } else { restart(lastSuccessfulRow); - scanner.next(); // skip presumed already mapped row + scanner.next(); // skip presumed already mapped row } value = scanner.next(); if (value != null && value.isStale()) { @@ -268,8 +254,8 @@ public boolean nextKeyValue() throws IOException, InterruptedException { long now = EnvironmentEdgeManager.currentTime(); LOG.info("Mapper took {}ms to process {} rows", (now - timestamp), rowcount); LOG.info(ioe.toString(), ioe); - String lastRow = lastSuccessfulRow == null ? - "null" : Bytes.toStringBinary(lastSuccessfulRow); + String lastRow = + lastSuccessfulRow == null ? "null" : Bytes.toStringBinary(lastSuccessfulRow); LOG.info("lastSuccessfulRow=" + lastRow); } throw ioe; @@ -277,10 +263,9 @@ public boolean nextKeyValue() throws IOException, InterruptedException { } /** - * If hbase runs on new version of mapreduce, RecordReader has access to - * counters thus can update counters based on scanMetrics. - * If hbase runs on old version of mapreduce, it won't be able to get - * access to counters and TableRecorderReader can't update counter values. + * If hbase runs on new version of mapreduce, RecordReader has access to counters thus can update + * counters based on scanMetrics. If hbase runs on old version of mapreduce, it won't be able to + * get access to counters and TableRecorderReader can't update counter values. */ private void updateCounters() { ScanMetrics scanMetrics = scanner.getScanMetrics(); @@ -292,8 +277,8 @@ private void updateCounters() { } /** - * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 - * Use {@link #updateCounters(ScanMetrics, long, TaskAttemptContext, long)} instead. + * @deprecated since 2.4.0 and 2.3.2, will be removed in 4.0.0 Use + * {@link #updateCounters(ScanMetrics, long, TaskAttemptContext, long)} instead. */ @Deprecated protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts, @@ -308,29 +293,28 @@ protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRes return; } - for (Map.Entry entry : scanMetrics.getMetricsMap().entrySet()) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, entry.getKey()); - if (counter != null) { - counter.increment(entry.getValue()); - } + for (Map.Entry entry : scanMetrics.getMetricsMap().entrySet()) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, entry.getKey()); + if (counter != null) { + counter.increment(entry.getValue()); } - if (numScannerRestarts != 0L) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"); - if (counter != null) { - counter.increment(numScannerRestarts); - } + } + if (numScannerRestarts != 0L) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS"); + if (counter != null) { + counter.increment(numScannerRestarts); } - if (numStale != 0L) { - Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE"); - if (counter != null) { - counter.increment(numStale); - } + } + if (numStale != 0L) { + Counter counter = context.getCounter(HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE"); + if (counter != null) { + counter.increment(numStale); } + } } /** * The current progress of the record reader through its data. - * * @return A number between 0.0 and 1.0, the fraction of the data read. */ public float getProgress() { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java index 07e44cbc28be..6c249abe3d11 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableReducer.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,28 +17,26 @@ */ package org.apache.hadoop.hbase.mapreduce; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Mutation; import org.apache.hadoop.mapreduce.Reducer; +import org.apache.yetus.audience.InterfaceAudience; /** - * Extends the basic Reducer class to add the required key and - * value input/output classes. While the input key and value as well as the - * output key can be anything handed in from the previous map phase the output - * value must be either a {@link org.apache.hadoop.hbase.client.Put Put} - * or a {@link org.apache.hadoop.hbase.client.Delete Delete} instance when - * using the {@link TableOutputFormat} class. + * Extends the basic Reducer class to add the required key and value input/output + * classes. While the input key and value as well as the output key can be anything handed in from + * the previous map phase the output value must be either a + * {@link org.apache.hadoop.hbase.client.Put Put} or a {@link org.apache.hadoop.hbase.client.Delete + * Delete} instance when using the {@link TableOutputFormat} class. *

    - * This class is extended by {@link IdentityTableReducer} but can also be - * subclassed to implement similar features or any custom code needed. It has - * the advantage to enforce the output value to a specific basic type. - * - * @param The type of the input key. - * @param The type of the input value. - * @param The type of the output key. + * This class is extended by {@link IdentityTableReducer} but can also be subclassed to implement + * similar features or any custom code needed. It has the advantage to enforce the output value to a + * specific basic type. + * @param The type of the input key. + * @param The type of the input value. + * @param The type of the output key. * @see org.apache.hadoop.mapreduce.Reducer */ @InterfaceAudience.Public public abstract class TableReducer -extends Reducer { + extends Reducer { } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java index 6fd0b6e3f116..8641d25d97a9 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.DataInput; @@ -23,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; @@ -43,40 +41,41 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job - * bypasses HBase servers, and directly accesses the underlying files (hfile, recovered edits, - * wals, etc) directly to provide maximum performance. The snapshot is not required to be - * restored to the live cluster or cloned. This also allows to run the mapreduce job from an - * online or offline hbase cluster. The snapshot files can be exported by using the - * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, - * and this InputFormat can be used to run the mapreduce job directly over the snapshot files. - * The snapshot should not be deleted while there are jobs reading from snapshot files. + * TableSnapshotInputFormat allows a MapReduce job to run over a table snapshot. The job bypasses + * HBase servers, and directly accesses the underlying files (hfile, recovered edits, wals, etc) + * directly to provide maximum performance. The snapshot is not required to be restored to the live + * cluster or cloned. This also allows to run the mapreduce job from an online or offline hbase + * cluster. The snapshot files can be exported by using the + * {@link org.apache.hadoop.hbase.snapshot.ExportSnapshot} tool, to a pure-hdfs cluster, and this + * InputFormat can be used to run the mapreduce job directly over the snapshot files. The snapshot + * should not be deleted while there are jobs reading from snapshot files. *

    * Usage is similar to TableInputFormat, and * {@link TableMapReduceUtil#initTableSnapshotMapperJob(String, Scan, Class, Class, Class, Job, boolean, Path)} * can be used to configure the job. - *

    {@code
    - * Job job = new Job(conf);
    - * Scan scan = new Scan();
    - * TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
    - *      scan, MyTableMapper.class, MyMapKeyOutput.class,
    - *      MyMapOutputValueWritable.class, job, true);
    + * 
    + * 
    + * {
    + *   @code
    + *   Job job = new Job(conf);
    + *   Scan scan = new Scan();
    + *   TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, MyTableMapper.class,
    + *     MyMapKeyOutput.class, MyMapOutputValueWritable.class, job, true);
      * }
      * 
    *

    - * Internally, this input format restores the snapshot into the given tmp directory. By default, - * and similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you - * can run N mapper tasks per every region, in which case the region key range will be split to - * N sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading - * from each RecordReader. An internal RegionScanner is used to execute the + * Internally, this input format restores the snapshot into the given tmp directory. By default, and + * similar to {@link TableInputFormat} an InputSplit is created per region, but optionally you can + * run N mapper tasks per every region, in which case the region key range will be split to N + * sub-ranges and an InputSplit will be created per sub-range. The region is opened for reading from + * each RecordReader. An internal RegionScanner is used to execute the * {@link org.apache.hadoop.hbase.CellScanner} obtained from the user. *

    * HBase owns all the data and snapshot files on the filesystem. Only the 'hbase' user can read from - * snapshot files and data files. - * To read from snapshot files directly from the file system, the user who is running the MR job - * must have sufficient permissions to access snapshot and reference files. - * This means that to run mapreduce over snapshot files, the MR job has to be run as the HBase - * user or the user must have group or other privileges in the filesystem (See HBASE-8369). + * snapshot files and data files. To read from snapshot files directly from the file system, the + * user who is running the MR job must have sufficient permissions to access snapshot and reference + * files. This means that to run mapreduce over snapshot files, the MR job has to be run as the + * HBase user or the user must have group or other privileges in the filesystem (See HBASE-8369). * Note that, given other users access to read from snapshot/data files will completely circumvent * the access control enforced by HBase. * @see org.apache.hadoop.hbase.client.TableSnapshotScanner @@ -123,8 +122,7 @@ public void readFields(DataInput in) throws IOException { } /** - * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 - * Use {@link #getRegion()} + * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use {@link #getRegion()} */ @Deprecated public HRegionInfo getRegionInfo() { @@ -141,19 +139,17 @@ TableSnapshotInputFormatImpl.InputSplit getDelegate() { } @InterfaceAudience.Private - static class TableSnapshotRegionRecordReader extends - RecordReader { + static class TableSnapshotRegionRecordReader + extends RecordReader { private TableSnapshotInputFormatImpl.RecordReader delegate = - new TableSnapshotInputFormatImpl.RecordReader(); + new TableSnapshotInputFormatImpl.RecordReader(); private TaskAttemptContext context; @Override - public void initialize(InputSplit split, TaskAttemptContext context) throws IOException, - InterruptedException { + public void initialize(InputSplit split, TaskAttemptContext context) + throws IOException, InterruptedException { this.context = context; - delegate.initialize( - ((TableSnapshotRegionSplit) split).delegate, - context.getConfiguration()); + delegate.initialize(((TableSnapshotRegionSplit) split).delegate, context.getConfiguration()); } @Override @@ -190,16 +186,16 @@ public void close() throws IOException { } @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext context) throws IOException { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext context) throws IOException { return new TableSnapshotRegionRecordReader(); } @Override public List getSplits(JobContext job) throws IOException, InterruptedException { List results = new ArrayList<>(); - for (TableSnapshotInputFormatImpl.InputSplit split : - TableSnapshotInputFormatImpl.getSplits(job.getConfiguration())) { + for (TableSnapshotInputFormatImpl.InputSplit split : TableSnapshotInputFormatImpl + .getSplits(job.getConfiguration())) { results.add(new TableSnapshotRegionSplit(split)); } return results; @@ -209,13 +205,12 @@ public List getSplits(JobContext job) throws IOException, Interrupte * Configures the job to use TableSnapshotInputFormat to read from a snapshot. * @param job the job to configure * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should have + * write permissions to this directory, and this should not be a subdirectory of rootdir. + * After the job is finished, restoreDir can be deleted. * @throws IOException if an error occurs */ - public static void setInput(Job job, String snapshotName, Path restoreDir) - throws IOException { + public static void setInput(Job job, String snapshotName, Path restoreDir) throws IOException { TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir); } @@ -223,21 +218,21 @@ public static void setInput(Job job, String snapshotName, Path restoreDir) * Configures the job to use TableSnapshotInputFormat to read from a snapshot. * @param job the job to configure * @param snapshotName the name of the snapshot to read from - * @param restoreDir a temporary directory to restore the snapshot into. Current user should - * have write permissions to this directory, and this should not be a subdirectory of rootdir. - * After the job is finished, restoreDir can be deleted. + * @param restoreDir a temporary directory to restore the snapshot into. Current user should have + * write permissions to this directory, and this should not be a subdirectory of rootdir. + * After the job is finished, restoreDir can be deleted. * @param splitAlgo split algorithm to generate splits from region * @param numSplitsPerRegion how many input splits to generate per one region * @throws IOException if an error occurs */ - public static void setInput(Job job, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { - TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir, - splitAlgo, numSplitsPerRegion); - } + public static void setInput(Job job, String snapshotName, Path restoreDir, + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { + TableSnapshotInputFormatImpl.setInput(job.getConfiguration(), snapshotName, restoreDir, + splitAlgo, numSplitsPerRegion); + } /** - * clean restore directory after snapshot scan job + * clean restore directory after snapshot scan job * @param job the snapshot scan job * @param snapshotName the name of the snapshot to read from * @throws IOException if an error occurs diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java index e106b9d41df6..453fb1c916e8 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import java.io.ByteArrayOutputStream; @@ -80,39 +79,37 @@ public class TableSnapshotInputFormatImpl { /** See {@link #getBestLocations(Configuration, HDFSBlocksDistribution, int)} */ private static final String LOCALITY_CUTOFF_MULTIPLIER = - "hbase.tablesnapshotinputformat.locality.cutoff.multiplier"; + "hbase.tablesnapshotinputformat.locality.cutoff.multiplier"; private static final float DEFAULT_LOCALITY_CUTOFF_MULTIPLIER = 0.8f; /** - * For MapReduce jobs running multiple mappers per region, determines - * what split algorithm we should be using to find split points for scanners. + * For MapReduce jobs running multiple mappers per region, determines what split algorithm we + * should be using to find split points for scanners. */ public static final String SPLIT_ALGO = "hbase.mapreduce.split.algorithm"; /** - * For MapReduce jobs running multiple mappers per region, determines - * number of splits to generate per region. + * For MapReduce jobs running multiple mappers per region, determines number of splits to generate + * per region. */ public static final String NUM_SPLITS_PER_REGION = "hbase.mapreduce.splits.per.region"; /** - * Whether to calculate the block location for splits. Default to true. - * If the computing layer runs outside of HBase cluster, the block locality does not master. - * Setting this value to false could skip the calculation and save some time. - * - * Set access modifier to "public" so that these could be accessed by test classes of - * both org.apache.hadoop.hbase.mapred - * and org.apache.hadoop.hbase.mapreduce. + * Whether to calculate the block location for splits. Default to true. If the computing layer + * runs outside of HBase cluster, the block locality does not master. Setting this value to false + * could skip the calculation and save some time. Set access modifier to "public" so that these + * could be accessed by test classes of both org.apache.hadoop.hbase.mapred and + * org.apache.hadoop.hbase.mapreduce. */ - public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY = + public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY = "hbase.TableSnapshotInputFormat.locality.enabled"; public static final boolean SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT = true; /** - * Whether to calculate the Snapshot region location by region location from meta. - * It is much faster than computing block locations for splits. + * Whether to calculate the Snapshot region location by region location from meta. It is much + * faster than computing block locations for splits. */ - public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION = - "hbase.TableSnapshotInputFormat.locality.by.region.location"; + public static final String SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION = + "hbase.TableSnapshotInputFormat.locality.by.region.location"; public static final boolean SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT = false; @@ -125,8 +122,8 @@ public class TableSnapshotInputFormatImpl { /** * Whether to enable scan metrics on Scan, default to true */ - public static final String SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED = - "hbase.TableSnapshotInputFormat.scan_metrics.enabled"; + public static final String SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED = + "hbase.TableSnapshotInputFormat.scan_metrics.enabled"; public static final boolean SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT = true; @@ -150,7 +147,8 @@ public static class InputSplit implements Writable { private String restoreDir; // constructor for mapreduce framework / Writable - public InputSplit() {} + public InputSplit() { + } public InputSplit(TableDescriptor htd, HRegionInfo regionInfo, List locations, Scan scan, Path restoreDir) { @@ -183,7 +181,7 @@ public String getRestoreDir() { } public long getLength() { - //TODO: We can obtain the file sizes of the snapshot here. + // TODO: We can obtain the file sizes of the snapshot here. return 0; } @@ -204,8 +202,7 @@ public HRegionInfo getRegionInfo() { @Override public void write(DataOutput out) throws IOException { TableSnapshotRegionSplit.Builder builder = TableSnapshotRegionSplit.newBuilder() - .setTable(ProtobufUtil.toTableSchema(htd)) - .setRegion(HRegionInfo.convert(regionInfo)); + .setTable(ProtobufUtil.toTableSchema(htd)).setRegion(HRegionInfo.convert(regionInfo)); for (String location : locations) { builder.addLocations(location); @@ -265,7 +262,6 @@ public void initialize(InputSplit split, Configuration conf) throws IOException HRegionInfo hri = this.split.getRegionInfo(); FileSystem fs = CommonFSUtils.getCurrentFileSystem(conf); - // region is immutable, this should be fine, // otherwise we have to set the thread read point scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED); @@ -279,7 +275,7 @@ public void initialize(InputSplit split, Configuration conf) throws IOException public boolean nextKeyValue() throws IOException { result = scanner.next(); if (result == null) { - //we are done + // we are done return false; } @@ -346,13 +342,12 @@ public static RegionSplitter.SplitAlgorithm getSplitAlgo(Configuration conf) thr try { return Class.forName(splitAlgoClassName).asSubclass(RegionSplitter.SplitAlgorithm.class) .getDeclaredConstructor().newInstance(); - } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | - NoSuchMethodException | InvocationTargetException e) { + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException + | NoSuchMethodException | InvocationTargetException e) { throw new IOException("SplitAlgo class " + splitAlgoClassName + " is not found", e); } } - public static List getRegionInfosFromManifest(SnapshotManifest manifest) { List regionManifests = manifest.getRegionManifests(); if (regionManifests == null) { @@ -384,7 +379,7 @@ public static Scan extractScanFromConf(Configuration conf) throws IOException { scan = TableMapReduceUtil.convertStringToScan(conf.get(TableInputFormat.SCAN)); } else if (conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST) != null) { String[] columns = - conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" "); + conf.get(org.apache.hadoop.hbase.mapred.TableInputFormat.COLUMN_LIST).split(" "); scan = new Scan(); for (String col : columns) { scan.addFamily(Bytes.toBytes(col)); @@ -394,11 +389,11 @@ public static Scan extractScanFromConf(Configuration conf) throws IOException { } if (scan.getReadType() == ReadType.DEFAULT) { - LOG.info("Provided Scan has DEFAULT ReadType," - + " updating STREAM for Snapshot-based InputFormat"); + LOG.info( + "Provided Scan has DEFAULT ReadType," + " updating STREAM for Snapshot-based InputFormat"); // Update the "DEFAULT" ReadType to be "STREAM" to try to improve the default case. scan.setReadType(conf.getEnum(SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE, - SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT)); + SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE_DEFAULT)); } return scan; @@ -410,15 +405,15 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, } public static List getSplits(Scan scan, SnapshotManifest manifest, - List regionManifests, Path restoreDir, - Configuration conf, RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException { + List regionManifests, Path restoreDir, Configuration conf, + RegionSplitter.SplitAlgorithm sa, int numSplits) throws IOException { // load table descriptor TableDescriptor htd = manifest.getTableDescriptor(); Path tableDir = CommonFSUtils.getTableDir(restoreDir, htd.getTableName()); boolean localityEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, - SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); + SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); boolean scanMetricsEnabled = conf.getBoolean(SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED, SNAPSHOT_INPUTFORMAT_SCAN_METRICS_ENABLED_DEFAULT); @@ -452,8 +447,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, if (localityEnabled) { if (regionLocator != null) { /* Get Location from the local cache */ - HRegionLocation - location = regionLocator.getRegionLocation(hri.getStartKey(), false); + HRegionLocation location = regionLocator.getRegionLocation(hri.getStartKey(), false); hosts = new ArrayList<>(1); hosts.add(location.getHostname()); @@ -466,7 +460,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, byte[][] sp = sa.split(hri.getStartKey(), hri.getEndKey(), numSplits, true); for (int i = 0; i < sp.length - 1; i++) { if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), sp[i], - sp[i + 1])) { + sp[i + 1])) { Scan boundedScan = new Scan(scan); if (scan.getStartRow().length == 0) { @@ -488,7 +482,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, } } else { if (PrivateCellUtil.overlappingKeys(scan.getStartRow(), scan.getStopRow(), - hri.getStartKey(), hri.getEndKey())) { + hri.getStartKey(), hri.getEndKey())) { splits.add(new InputSplit(htd, hri, hosts, scan, restoreDir)); } @@ -503,8 +497,7 @@ public static List getSplits(Scan scan, SnapshotManifest manifest, * only when localityEnabled is true. */ private static List calculateLocationsForInputSplit(Configuration conf, - TableDescriptor htd, HRegionInfo hri, Path tableDir) - throws IOException { + TableDescriptor htd, HRegionInfo hri, Path tableDir) throws IOException { return getBestLocations(conf, HRegion.computeHDFSBlocksDistribution(conf, htd, hri, tableDir)); } @@ -514,12 +507,11 @@ private static List calculateLocationsForInputSplit(Configuration conf, * do not want to blindly pass all the locations, since we are creating one split per region, and * the region's blocks are all distributed throughout the cluster unless favorite node assignment * is used. On the expected stable case, only one location will contain most of the blocks as - * local. - * On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. Here - * we are doing a simple heuristic, where we will pass all hosts which have at least 80% + * local. On the other hand, in favored node assignment, 3 nodes will contain highly local blocks. + * Here we are doing a simple heuristic, where we will pass all hosts which have at least 80% * (hbase.tablesnapshotinputformat.locality.cutoff.multiplier) as much block locality as the top - * host with the best locality. - * Return at most numTopsAtMost locations if there are more than that. + * host with the best locality. Return at most numTopsAtMost locations if there are more than + * that. */ private static List getBestLocations(Configuration conf, HDFSBlocksDistribution blockDistribution, int numTopsAtMost) { @@ -543,8 +535,8 @@ private static List getBestLocations(Configuration conf, // When top >= 2, // do the heuristic: filter all hosts which have at least cutoffMultiplier % of block locality - double cutoffMultiplier - = conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); + double cutoffMultiplier = + conf.getFloat(LOCALITY_CUTOFF_MULTIPLIER, DEFAULT_LOCALITY_CUTOFF_MULTIPLIER); double filterWeight = topHost.getWeight() * cutoffMultiplier; @@ -601,12 +593,11 @@ public static void setInput(Configuration conf, String snapshotName, Path restor * @throws IOException if an error occurs */ public static void setInput(Configuration conf, String snapshotName, Path restoreDir, - RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) - throws IOException { + RegionSplitter.SplitAlgorithm splitAlgo, int numSplitsPerRegion) throws IOException { conf.set(SNAPSHOT_NAME_KEY, snapshotName); if (numSplitsPerRegion < 1) { - throw new IllegalArgumentException("numSplits must be >= 1, " + - "illegal numSplits : " + numSplitsPerRegion); + throw new IllegalArgumentException( + "numSplits must be >= 1, " + "illegal numSplits : " + numSplitsPerRegion); } if (splitAlgo == null && numSplitsPerRegion > 1) { throw new IllegalArgumentException("Split algo can't be null when numSplits > 1"); @@ -625,7 +616,7 @@ public static void setInput(Configuration conf, String snapshotName, Path restor } /** - * clean restore directory after snapshot scan job + * clean restore directory after snapshot scan job * @param job the snapshot scan job * @param snapshotName the name of the snapshot to read from * @throws IOException if an error occurs @@ -641,6 +632,6 @@ public static void cleanRestoreDir(Job job, String snapshotName) throws IOExcept if (!fs.delete(restoreDir, true)) { LOG.warn("Failed clean restore dir {} for snapshot {}", restoreDir, snapshotName); } - LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName); + LOG.debug("Clean restore directory {} for {}", restoreDir, snapshotName); } } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java index 93300ebb0f39..939fc5811eb3 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableSplit.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,12 +33,11 @@ import org.slf4j.LoggerFactory; /** - * A table split corresponds to a key range (low, high) and an optional scanner. - * All references to row below refer to the key of the row. + * A table split corresponds to a key range (low, high) and an optional scanner. All references to + * row below refer to the key of the row. */ @InterfaceAudience.Public -public class TableSplit extends InputSplit - implements Writable, Comparable { +public class TableSplit extends InputSplit implements Writable, Comparable { /** @deprecated LOG variable would be made private. fix in hbase 3.0 */ @Deprecated public static final Logger LOG = LoggerFactory.getLogger(TableSplit.class); @@ -79,76 +77,68 @@ static Version fromCode(int code) { private static final Version VERSION = Version.WITH_ENCODED_REGION_NAME; private TableName tableName; - private byte [] startRow; - private byte [] endRow; + private byte[] startRow; + private byte[] endRow; private String regionLocation; private String encodedRegionName = ""; /** - * The scan object may be null but the serialized form of scan is never null - * or empty since we serialize the scan object with default values then. - * Having no scanner in TableSplit doesn't necessarily mean there is no scanner - * for mapreduce job, it just means that we do not need to set it for each split. - * For example, it is not required to have a scan object for - * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the - * job conf and scanner is supposed to be same for all the splits of table. + * The scan object may be null but the serialized form of scan is never null or empty since we + * serialize the scan object with default values then. Having no scanner in TableSplit doesn't + * necessarily mean there is no scanner for mapreduce job, it just means that we do not need to + * set it for each split. For example, it is not required to have a scan object for + * {@link org.apache.hadoop.hbase.mapred.TableInputFormatBase} since we use the scan from the job + * conf and scanner is supposed to be same for all the splits of table. */ private String scan = ""; // stores the serialized form of the Scan private long length; // Contains estimation of region size in bytes /** Default constructor. */ public TableSplit() { - this((TableName)null, null, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, ""); + this((TableName) null, null, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, ""); } /** - * Creates a new instance while assigning all variables. - * Length of region is set to 0 - * Encoded name of the region is set to blank - * - * @param tableName The name of the current table. + * Creates a new instance while assigning all variables. Length of region is set to 0 Encoded name + * of the region is set to blank + * @param tableName The name of the current table. * @param scan The scan associated with this split. - * @param startRow The start row of the split. - * @param endRow The end row of the split. - * @param location The location of the region. + * @param startRow The start row of the split. + * @param endRow The end row of the split. + * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, - final String location) { + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, + final String location) { this(tableName, scan, startRow, endRow, location, 0L); } /** - * Creates a new instance while assigning all variables. - * Encoded name of region is set to blank - * - * @param tableName The name of the current table. + * Creates a new instance while assigning all variables. Encoded name of region is set to blank + * @param tableName The name of the current table. * @param scan The scan associated with this split. - * @param startRow The start row of the split. - * @param endRow The end row of the split. - * @param location The location of the region. + * @param startRow The start row of the split. + * @param endRow The end row of the split. + * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, final String location, long length) { this(tableName, scan, startRow, endRow, location, "", length); } /** * Creates a new instance while assigning all variables. - * - * @param tableName The name of the current table. + * @param tableName The name of the current table. * @param scan The scan associated with this split. - * @param startRow The start row of the split. - * @param endRow The end row of the split. + * @param startRow The start row of the split. + * @param endRow The end row of the split. * @param encodedRegionName The region ID. - * @param location The location of the region. + * @param location The location of the region. */ - public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow, + public TableSplit(TableName tableName, Scan scan, byte[] startRow, byte[] endRow, final String location, final String encodedRegionName, long length) { this.tableName = tableName; try { - this.scan = - (null == scan) ? "" : TableMapReduceUtil.convertScanToString(scan); + this.scan = (null == scan) ? "" : TableMapReduceUtil.convertScanToString(scan); } catch (IOException e) { LOG.warn("Failed to convert Scan to String", e); } @@ -160,36 +150,31 @@ public TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endR } /** - * Creates a new instance without a scanner. - * Length of region is set to 0 - * + * Creates a new instance without a scanner. Length of region is set to 0 * @param tableName The name of the current table. * @param startRow The start row of the split. * @param endRow The end row of the split. * @param location The location of the region. */ - public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, - final String location) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location) { this(tableName, null, startRow, endRow, location); } /** * Creates a new instance without a scanner. - * * @param tableName The name of the current table. * @param startRow The start row of the split. * @param endRow The end row of the split. * @param location The location of the region. * @param length Size of region in bytes */ - public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, - final String location, long length) { + public TableSplit(TableName tableName, byte[] startRow, byte[] endRow, final String location, + long length) { this(tableName, null, startRow, endRow, location, length); } /** * Returns a Scan object from the stored string representation. - * * @return Returns a Scan object based on the stored scanner. * @throws IOException throws IOException if deserialization fails */ @@ -199,9 +184,9 @@ public Scan getScan() throws IOException { /** * Returns a scan string - * @return scan as string. Should be noted that this is not same as getScan().toString() - * because Scan object will have the default values when empty scan string is - * deserialized. Thus, getScan().toString() can never be empty + * @return scan as string. Should be noted that this is not same as getScan().toString() because + * Scan object will have the default values when empty scan string is deserialized. Thus, + * getScan().toString() can never be empty */ @InterfaceAudience.Private public String getScanAsString() { @@ -213,17 +198,16 @@ public String getScanAsString() { * @see #getTable() * @return The table name. */ - public byte [] getTableName() { + public byte[] getTableName() { return tableName.getName(); } /** * Returns the table name. - * * @return The table name. */ public TableName getTable() { - // It is ugly that usually to get a TableName, the method is called getTableName. We can't do + // It is ugly that usually to get a TableName, the method is called getTableName. We can't do // that in here though because there was an existing getTableName in place already since // deprecated. return tableName; @@ -231,25 +215,22 @@ public TableName getTable() { /** * Returns the start row. - * * @return The start row. */ - public byte [] getStartRow() { + public byte[] getStartRow() { return startRow; } /** * Returns the end row. - * * @return The end row. */ - public byte [] getEndRow() { + public byte[] getEndRow() { return endRow; } /** * Returns the region location. - * * @return The region's location. */ public String getRegionLocation() { @@ -258,18 +239,16 @@ public String getRegionLocation() { /** * Returns the region's location as an array. - * * @return The array containing the region location. * @see org.apache.hadoop.mapreduce.InputSplit#getLocations() */ @Override public String[] getLocations() { - return new String[] {regionLocation}; + return new String[] { regionLocation }; } /** * Returns the region's encoded name. - * * @return The region's encoded name. */ public String getEncodedRegionName() { @@ -278,7 +257,6 @@ public String getEncodedRegionName() { /** * Returns the length of the split. - * * @return The length of the split. * @see org.apache.hadoop.mapreduce.InputSplit#getLength() */ @@ -289,8 +267,7 @@ public long getLength() { /** * Reads the values of each field. - * - * @param in The input to read from. + * @param in The input to read from. * @throws IOException When reading the input fails. */ @Override @@ -327,8 +304,7 @@ public void readFields(DataInput in) throws IOException { /** * Writes the field values to the output. - * - * @param out The output to write to. + * @param out The output to write to. * @throws IOException When writing the values to the output fails. */ @Override @@ -345,7 +321,6 @@ public void write(DataOutput out) throws IOException { /** * Returns the details about this instance as a string. - * * @return The values of this instance as a string. * @see java.lang.Object#toString() */ @@ -360,8 +335,7 @@ public String toString() { try { // get the real scan here in toString, not the Base64 string printScan = TableMapReduceUtil.convertStringToScan(scan).toString(); - } - catch (IOException e) { + } catch (IOException e) { printScan = ""; } sb.append(", scan=").append(printScan); @@ -376,8 +350,7 @@ public String toString() { /** * Compares this split against the given one. - * - * @param split The split to compare to. + * @param split The split to compare to. * @return The result of the comparison. * @see java.lang.Comparable#compareTo(java.lang.Object) */ @@ -385,10 +358,9 @@ public String toString() { public int compareTo(TableSplit split) { // If The table name of the two splits is the same then compare start row // otherwise compare based on table names - int tableNameComparison = - getTable().compareTo(split.getTable()); - return tableNameComparison != 0 ? tableNameComparison : Bytes.compareTo( - getStartRow(), split.getStartRow()); + int tableNameComparison = getTable().compareTo(split.getTable()); + return tableNameComparison != 0 ? tableNameComparison + : Bytes.compareTo(getStartRow(), split.getStartRow()); } @Override @@ -396,10 +368,10 @@ public boolean equals(Object o) { if (o == null || !(o instanceof TableSplit)) { return false; } - return tableName.equals(((TableSplit)o).tableName) && - Bytes.equals(startRow, ((TableSplit)o).startRow) && - Bytes.equals(endRow, ((TableSplit)o).endRow) && - regionLocation.equals(((TableSplit)o).regionLocation); + return tableName.equals(((TableSplit) o).tableName) + && Bytes.equals(startRow, ((TableSplit) o).startRow) + && Bytes.equals(endRow, ((TableSplit) o).endRow) + && regionLocation.equals(((TableSplit) o).regionLocation); } @Override diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java index 667ca97e3f1b..4376bee85f91 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TextSortReducer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.List; import java.util.Set; import java.util.TreeSet; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; @@ -33,7 +32,6 @@ import org.apache.hadoop.hbase.KeyValueUtil; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.security.visibility.InvalidLabelException; import org.apache.hadoop.hbase.util.Bytes; @@ -41,6 +39,7 @@ import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.util.StringUtils; +import org.apache.yetus.audience.InterfaceAudience; /** * Emits Sorted KeyValues. Parse the passed text and creates KeyValues. Sorts them before emit. @@ -49,8 +48,8 @@ * @see PutSortReducer */ @InterfaceAudience.Public -public class TextSortReducer extends - Reducer { +public class TextSortReducer + extends Reducer { /** Timestamp for all inserted rows */ private long ts; @@ -90,11 +89,10 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subsclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subsclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. * @param context */ @Override @@ -132,16 +130,11 @@ protected void doSetup(Context context, Configuration conf) { } @Override - protected void reduce( - ImmutableBytesWritable rowKey, - java.lang.Iterable lines, - Reducer.Context context) - throws java.io.IOException, InterruptedException - { + protected void reduce(ImmutableBytesWritable rowKey, java.lang.Iterable lines, + Reducer.Context context) + throws java.io.IOException, InterruptedException { // although reduce() is called per-row, handle pathological case - long threshold = context.getConfiguration().getLong( - "reducer.row.threshold", 1L * (1<<30)); + long threshold = context.getConfiguration().getLong("reducer.row.threshold", 1L * (1 << 30)); Iterator iter = lines.iterator(); while (iter.hasNext()) { Set kvs = new TreeSet<>(CellComparator.getInstance()); @@ -160,8 +153,8 @@ protected void reduce( // create tags for the parsed line List tags = new ArrayList<>(); if (cellVisibilityExpr != null) { - tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( - cellVisibilityExpr)); + tags.addAll(kvCreator.getVisibilityExpressionResolver() + .createVisibilityExpTags(cellVisibilityExpr)); } // Add TTL directly to the KV so we can vary them when packing more than one KV // into puts @@ -170,16 +163,17 @@ protected void reduce( } for (int i = 0; i < parsed.getColumnCount(); i++) { if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() - || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() + || i == parser.getAttributesKeyColumnIndex() + || i == parser.getCellVisibilityColumnIndex() || i == parser.getCellTTLColumnIndex()) { continue; } // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. Cell cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), - parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, - parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, - parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); + parsed.getRowKeyLength(), parser.getFamily(i), 0, parser.getFamily(i).length, + parser.getQualifier(i), 0, parser.getQualifier(i).length, ts, lineBytes, + parsed.getColumnOffset(i), parsed.getColumnLength(i), tags); KeyValue kv = KeyValueUtil.ensureKeyValue(cell); kvs.add(kv); curSize += kv.heapSize(); @@ -194,13 +188,12 @@ protected void reduce( throw new IOException(badLine); } } - context.setStatus("Read " + kvs.size() + " entries of " + kvs.getClass() - + "(" + StringUtils.humanReadableInt(curSize) + ")"); + context.setStatus("Read " + kvs.size() + " entries of " + kvs.getClass() + "(" + + StringUtils.humanReadableInt(curSize) + ")"); int index = 0; for (KeyValue kv : kvs) { context.write(rowKey, kv); - if (++index > 0 && index % 100 == 0) - context.setStatus("Wrote " + index + " key values."); + if (++index > 0 && index % 100 == 0) context.setStatus("Wrote " + index + " key values."); } // if we have more entries to process diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java index 8dc7156d099a..80937fde9fcb 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,14 +21,12 @@ import java.util.ArrayList; import java.util.Base64; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.ArrayBackedTag; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.Tag; import org.apache.hadoop.hbase.TagType; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -39,14 +37,13 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; import org.apache.hadoop.mapreduce.Mapper; +import org.apache.yetus.audience.InterfaceAudience; /** * Write table content out to files in hdfs. */ @InterfaceAudience.Public -public class TsvImporterMapper -extends Mapper -{ +public class TsvImporterMapper extends Mapper { /** Timestamp for all inserted rows */ protected long ts; @@ -56,7 +53,7 @@ public class TsvImporterMapper /** Should skip bad lines */ private boolean skipBadLines; - /** Should skip empty columns*/ + /** Should skip empty columns */ private boolean skipEmptyColumns; private Counter badLineCount; private boolean logBadLines; @@ -93,11 +90,10 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subsclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subsclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. * @param context */ @Override @@ -105,8 +101,7 @@ protected void setup(Context context) { doSetup(context); conf = context.getConfiguration(); - parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), - separator); + parser = new ImportTsv.TsvParser(conf.get(ImportTsv.COLUMNS_CONF_KEY), separator); if (parser.getRowKeyColumnIndex() == -1) { throw new RuntimeException("No row key column specified"); } @@ -133,10 +128,8 @@ protected void doSetup(Context context) { // configuration. ts = conf.getLong(ImportTsv.TIMESTAMP_CONF_KEY, 0); - skipEmptyColumns = context.getConfiguration().getBoolean( - ImportTsv.SKIP_EMPTY_COLUMNS, false); - skipBadLines = context.getConfiguration().getBoolean( - ImportTsv.SKIP_LINES_CONF_KEY, true); + skipEmptyColumns = context.getConfiguration().getBoolean(ImportTsv.SKIP_EMPTY_COLUMNS, false); + skipBadLines = context.getConfiguration().getBoolean(ImportTsv.SKIP_LINES_CONF_KEY, true); badLineCount = context.getCounter("ImportTsv", "Bad Lines"); logBadLines = context.getConfiguration().getBoolean(ImportTsv.LOG_BAD_LINES_CONF_KEY, false); hfileOutPath = conf.get(ImportTsv.BULK_OUTPUT_CONF_KEY); @@ -146,18 +139,13 @@ protected void doSetup(Context context) { * Convert a line of TSV text into an HBase table row. */ @Override - public void map(LongWritable offset, Text value, - Context context) - throws IOException { + public void map(LongWritable offset, Text value, Context context) throws IOException { byte[] lineBytes = value.getBytes(); try { - ImportTsv.TsvParser.ParsedLine parsed = parser.parse( - lineBytes, value.getLength()); + ImportTsv.TsvParser.ParsedLine parsed = parser.parse(lineBytes, value.getLength()); ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(lineBytes, - parsed.getRowKeyOffset(), - parsed.getRowKeyLength()); + new ImmutableBytesWritable(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength()); // Retrieve timestamp if exists ts = parsed.getTimestamp(ts); cellVisibilityExpr = parsed.getCellVisibility(); @@ -167,8 +155,8 @@ public void map(LongWritable offset, Text value, if (hfileOutPath != null) { tags.clear(); if (cellVisibilityExpr != null) { - tags.addAll(kvCreator.getVisibilityExpressionResolver().createVisibilityExpTags( - cellVisibilityExpr)); + tags.addAll(kvCreator.getVisibilityExpressionResolver() + .createVisibilityExpTags(cellVisibilityExpr)); } // Add TTL directly to the KV so we can vary them when packing more than one KV // into puts @@ -179,9 +167,9 @@ public void map(LongWritable offset, Text value, Put put = new Put(rowKey.copyBytes()); for (int i = 0; i < parsed.getColumnCount(); i++) { if (i == parser.getRowKeyColumnIndex() || i == parser.getTimestampKeyColumnIndex() - || i == parser.getAttributesKeyColumnIndex() || i == parser.getCellVisibilityColumnIndex() - || i == parser.getCellTTLColumnIndex() || (skipEmptyColumns - && parsed.getColumnLength(i) == 0)) { + || i == parser.getAttributesKeyColumnIndex() + || i == parser.getCellVisibilityColumnIndex() || i == parser.getCellTTLColumnIndex() + || (skipEmptyColumns && parsed.getColumnLength(i) == 0)) { continue; } populatePut(lineBytes, parsed, put, i); @@ -223,9 +211,9 @@ protected void populatePut(byte[] lineBytes, ImportTsv.TsvParser.ParsedLine pars // Creating the KV which needs to be directly written to HFiles. Using the Facade // KVCreator for creation of kvs. cell = this.kvCreator.create(lineBytes, parsed.getRowKeyOffset(), parsed.getRowKeyLength(), - parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, - parser.getQualifier(i).length, ts, lineBytes, parsed.getColumnOffset(i), - parsed.getColumnLength(i), tags); + parser.getFamily(i), 0, parser.getFamily(i).length, parser.getQualifier(i), 0, + parser.getQualifier(i).length, ts, lineBytes, parsed.getColumnOffset(i), + parsed.getColumnLength(i), tags); } put.add(cell); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java index f3f81ec1a717..de5142cdc00f 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TsvImporterTextMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,23 +19,21 @@ import java.io.IOException; import java.util.Base64; - -import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.Text; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Pair; -import org.apache.hadoop.mapreduce.Mapper; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Counter; +import org.apache.hadoop.mapreduce.Mapper; import org.apache.yetus.audience.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; /** * Write table content out to map output files. */ @InterfaceAudience.Public public class TsvImporterTextMapper -extends Mapper -{ + extends Mapper { /** Column seperator */ private String separator; @@ -60,11 +58,10 @@ public void incrementBadLineCount(int count) { } /** - * Handles initializing this class with objects specific to it (i.e., the parser). - * Common initialization that might be leveraged by a subclass is done in - * doSetup. Hence a subclass may choose to override this method - * and call doSetup as well before handling it's own custom params. - * + * Handles initializing this class with objects specific to it (i.e., the parser). Common + * initialization that might be leveraged by a subclass is done in doSetup. Hence a + * subclass may choose to override this method and call doSetup as well before + * handling it's own custom params. * @param context */ @Override @@ -106,11 +103,12 @@ protected void doSetup(Context context) { @Override public void map(LongWritable offset, Text value, Context context) throws IOException { try { - Pair rowKeyOffests = parser.parseRowKey(value.getBytes(), value.getLength()); - ImmutableBytesWritable rowKey = new ImmutableBytesWritable( - value.getBytes(), rowKeyOffests.getFirst(), rowKeyOffests.getSecond()); + Pair rowKeyOffests = + parser.parseRowKey(value.getBytes(), value.getLength()); + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(value.getBytes(), + rowKeyOffests.getFirst(), rowKeyOffests.getSecond()); context.write(rowKey, value); - } catch (ImportTsv.TsvParser.BadTsvLineException|IllegalArgumentException badLine) { + } catch (ImportTsv.TsvParser.BadTsvLineException | IllegalArgumentException badLine) { if (logBadLines) { System.err.println(value); } diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java index f0f4c82a5ad8..b42c0d9116d2 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/VisibilityExpressionResolver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,10 +19,9 @@ import java.io.IOException; import java.util.List; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hbase.Tag; +import org.apache.yetus.audience.InterfaceAudience; /** * Interface to convert visibility expressions into Tags for storing along with Cells in HFiles. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java index ffc202ab0dc7..d47344afb516 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALInputFormat.java @@ -61,8 +61,7 @@ public class WALInputFormat extends InputFormat { public static final String END_TIME_KEY = "wal.end.time"; /** - * {@link InputSplit} for {@link WAL} files. Each split represent - * exactly one log file. + * {@link InputSplit} for {@link WAL} files. Each split represent exactly one log file. */ static class WALSplit extends InputSplit implements Writable { private String logFileName; @@ -71,12 +70,12 @@ static class WALSplit extends InputSplit implements Writable { private long endTime; /** for serialization */ - public WALSplit() {} + public WALSplit() { + } /** - * Represent an WALSplit, i.e. a single WAL file. - * Start- and EndTime are managed by the split, so that WAL files can be - * filtered before WALEdits are passed to the mapper(s). + * Represent an WALSplit, i.e. a single WAL file. Start- and EndTime are managed by the split, + * so that WAL files can be filtered before WALEdits are passed to the mapper(s). */ public WALSplit(String logFileName, long fileSize, long startTime, long endTime) { this.logFileName = logFileName; @@ -131,8 +130,8 @@ public String toString() { } /** - * {@link RecordReader} for an {@link WAL} file. - * Implementation shared with deprecated HLogInputFormat. + * {@link RecordReader} for an {@link WAL} file. Implementation shared with deprecated + * HLogInputFormat. */ static abstract class WALRecordReader extends RecordReader { private Reader reader = null; @@ -147,7 +146,7 @@ static abstract class WALRecordReader extends RecordReader { @Override @@ -261,8 +260,7 @@ public WALKey getCurrentKey() throws IOException, InterruptedException { } @Override - public List getSplits(JobContext context) throws IOException, - InterruptedException { + public List getSplits(JobContext context) throws IOException, InterruptedException { return getSplits(context, START_TIME_KEY, END_TIME_KEY); } @@ -278,14 +276,14 @@ List getSplits(final JobContext context, final String startKey, fina long endTime = conf.getLong(endKey, Long.MAX_VALUE); List allFiles = new ArrayList(); - for(Path inputPath: inputPaths){ + for (Path inputPath : inputPaths) { FileSystem fs = inputPath.getFileSystem(conf); try { List files = getFiles(fs, inputPath, startTime, endTime); allFiles.addAll(files); } catch (FileNotFoundException e) { if (ignoreMissing) { - LOG.warn("File "+ inputPath +" is missing. Skipping it."); + LOG.warn("File " + inputPath + " is missing. Skipping it."); continue; } throw e; @@ -300,17 +298,17 @@ List getSplits(final JobContext context, final String startKey, fina private Path[] getInputPaths(Configuration conf) { String inpDirs = conf.get(FileInputFormat.INPUT_DIR); - return StringUtils.stringToPath( - inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); + return StringUtils + .stringToPath(inpDirs.split(conf.get(WALPlayer.INPUT_FILES_SEPARATOR_KEY, ","))); } /** - * @param startTime If file looks like it has a timestamp in its name, we'll check if newer - * or equal to this value else we will filter out the file. If name does not - * seem to have a timestamp, we will just return it w/o filtering. + * @param startTime If file looks like it has a timestamp in its name, we'll check if newer or + * equal to this value else we will filter out the file. If name does not seem to have a + * timestamp, we will just return it w/o filtering. * @param endTime If file looks like it has a timestamp in its name, we'll check if older or equal - * to this value else we will filter out the file. If name does not seem to - * have a timestamp, we will just return it w/o filtering. + * to this value else we will filter out the file. If name does not seem to have a + * timestamp, we will just return it w/o filtering. */ private List getFiles(FileSystem fs, Path dir, long startTime, long endTime) throws IOException { @@ -344,8 +342,8 @@ static void addFile(List result, LocatedFileStatus lfs, long startTi LOG.info("Found {}", lfs.getPath()); result.add(lfs); } else { - LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), - startTime, Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); + LOG.info("Skipped {}, outside range [{}/{} - {}/{}]", lfs.getPath(), startTime, + Instant.ofEpochMilli(startTime), endTime, Instant.ofEpochMilli(endTime)); } } else { // If no timestamp, add it regardless. diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java index a6fef42b1c3c..8d1efdebfcdf 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/WALPlayer.java @@ -55,17 +55,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - - /** - * A tool to replay WAL files as a M/R job. - * The WAL can be replayed for a set of tables or all tables, - * and a time range can be provided (in milliseconds). - * The WAL is filtered to the passed set of tables and the output - * can optionally be mapped to another set of tables. - * - * WAL replay can also generate HFiles for later bulk importing, - * in that case the WAL is replayed for a single table only. + * A tool to replay WAL files as a M/R job. The WAL can be replayed for a set of tables or all + * tables, and a time range can be provided (in milliseconds). The WAL is filtered to the passed set + * of tables and the output can optionally be mapped to another set of tables. WAL replay can also + * generate HFiles for later bulk importing, in that case the WAL is replayed for a single table + * only. */ @InterfaceAudience.Public public class WALPlayer extends Configured implements Tool { @@ -77,10 +72,9 @@ public class WALPlayer extends Configured implements Tool { public final static String INPUT_FILES_SEPARATOR_KEY = "wal.input.separator"; public final static String IGNORE_MISSING_FILES = "wal.input.ignore.missing.files"; - private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; - public WALPlayer(){ + public WALPlayer() { } protected WALPlayer(final Configuration c) { @@ -88,19 +82,16 @@ protected WALPlayer(final Configuration c) { } /** - * A mapper that just writes out KeyValues. - * This one can be used together with {@link KeyValueSortReducer} - * @deprecated Use {@link WALCellMapper}. Will be removed from 3.0 onwards + * A mapper that just writes out KeyValues. This one can be used together with + * {@link KeyValueSortReducer} + * @deprecated Use {@link WALCellMapper}. Will be removed from 3.0 onwards */ @Deprecated - static class WALKeyValueMapper - extends Mapper { + static class WALKeyValueMapper extends Mapper { private byte[] table; @Override - public void map(WALKey key, WALEdit value, - Context context) - throws IOException { + public void map(WALKey key, WALEdit value, Context context) throws IOException { try { // skip all other tables if (Bytes.equals(table, key.getTableName().getName())) { @@ -130,18 +121,15 @@ public void setup(Context context) throws IOException { } } + /** - * A mapper that just writes out Cells. - * This one can be used together with {@link CellSortReducer} + * A mapper that just writes out Cells. This one can be used together with {@link CellSortReducer} */ - static class WALCellMapper - extends Mapper { + static class WALCellMapper extends Mapper { private byte[] table; @Override - public void map(WALKey key, WALEdit value, - Context context) - throws IOException { + public void map(WALKey key, WALEdit value, Context context) throws IOException { try { // skip all other tables if (Bytes.equals(table, key.getTableName().getName())) { @@ -173,36 +161,30 @@ public void setup(Context context) throws IOException { } /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected static enum Counter { /** Number of aggregated writes */ PUTS, /** Number of aggregated deletes */ - DELETES, - CELLS_READ, - CELLS_WRITTEN, - WALEDITS + DELETES, CELLS_READ, CELLS_WRITTEN, WALEDITS } /** - * A mapper that writes out {@link Mutation} to be directly applied to - * a running HBase instance. + * A mapper that writes out {@link Mutation} to be directly applied to a running HBase instance. */ protected static class WALMapper - extends Mapper { + extends Mapper { private Map tables = new TreeMap<>(); @Override - public void map(WALKey key, WALEdit value, Context context) - throws IOException { + public void map(WALKey key, WALEdit value, Context context) throws IOException { context.getCounter(Counter.WALEDITS).increment(1); try { if (tables.isEmpty() || tables.containsKey(key.getTableName())) { - TableName targetTable = tables.isEmpty() ? - key.getTableName() : - tables.get(key.getTableName()); + TableName targetTable = + tables.isEmpty() ? key.getTableName() : tables.get(key.getTableName()); ImmutableBytesWritable tableOut = new ImmutableBytesWritable(targetTable.getName()); Put put = null; Delete del = null; @@ -288,8 +270,7 @@ public void setup(Context context) throws IOException { int i = 0; if (tablesToUse != null) { for (String table : tablesToUse) { - tables.put(TableName.valueOf(table), - TableName.valueOf(tableMap[i++])); + tables.put(TableName.valueOf(table), TableName.valueOf(tableMap[i++])); } } } @@ -309,9 +290,9 @@ void setupTime(Configuration conf, String option) throws IOException { // then see if just a number of ms's was specified ms = Long.parseLong(val); } catch (NumberFormatException nfe) { - throw new IOException(option - + " must be specified either in the form 2001-02-20T16:35:06.99 " - + "or as number of milliseconds"); + throw new IOException( + option + " must be specified either in the form 2001-02-20T16:35:06.99 " + + "or as number of milliseconds"); } } conf.setLong(option, ms); @@ -319,8 +300,7 @@ void setupTime(Configuration conf, String option) throws IOException { /** * Sets up the actual job. - * - * @param args The command line parameters. + * @param args The command line parameters. * @return The newly created job. * @throws IOException When setting up the job fails. */ @@ -329,7 +309,7 @@ public Job createSubmittableJob(String[] args) throws IOException { setupTime(conf, WALInputFormat.START_TIME_KEY); setupTime(conf, WALInputFormat.END_TIME_KEY); String inputDirs = args[0]; - String[] tables = args.length == 1? new String [] {}: args[1].split(","); + String[] tables = args.length == 1 ? new String[] {} : args[1].split(","); String[] tableMap; if (args.length > 2) { tableMap = args[2].split(","); @@ -343,8 +323,8 @@ public Job createSubmittableJob(String[] args) throws IOException { conf.setStrings(TABLES_KEY, tables); conf.setStrings(TABLE_MAP_KEY, tableMap); conf.set(FileInputFormat.INPUT_DIR, inputDirs); - Job job = Job.getInstance(conf, conf.get(JOB_NAME_CONF_KEY, NAME + "_" + - EnvironmentEdgeManager.currentTime())); + Job job = Job.getInstance(conf, + conf.get(JOB_NAME_CONF_KEY, NAME + "_" + EnvironmentEdgeManager.currentTime())); job.setJarByClass(WALPlayer.class); job.setInputFormatClass(WALInputFormat.class); @@ -370,7 +350,7 @@ public Job createSubmittableJob(String[] args) throws IOException { HFileOutputFormat2.configureIncrementalLoad(job, table.getDescriptor(), regionLocator); } TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); + org.apache.hbase.thirdparty.com.google.common.base.Preconditions.class); } else { // output to live cluster job.setMapperClass(WALMapper.class); @@ -382,17 +362,17 @@ public Job createSubmittableJob(String[] args) throws IOException { } String codecCls = WALCellCodec.getWALCellCodecClass(conf).getName(); try { - TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Class.forName(codecCls)); + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), + Class.forName(codecCls)); } catch (Exception e) { throw new IOException("Cannot determine wal codec class " + codecCls, e); } return job; } - /** * Print usage - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private void usage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { @@ -402,12 +382,12 @@ private void usage(final String errorMsg) { System.err.println(" directory of WALs to replay."); System.err.println(" comma separated list of tables. If no tables specified,"); System.err.println(" all are imported (even hbase:meta if present)."); - System.err.println(" WAL entries can be mapped to a new set of tables by " + - "passing"); - System.err.println(" , a comma separated list of target " + - "tables."); - System.err.println(" If specified, each table in must have a " + - "mapping."); + System.err.println( + " WAL entries can be mapped to a new set of tables by " + "passing"); + System.err.println( + " , a comma separated list of target " + "tables."); + System.err.println( + " If specified, each table in must have a " + "mapping."); System.err.println("To generate HFiles to bulk load instead of loading HBase directly, pass:"); System.err.println(" -D" + BULK_OUTPUT_CONF_KEY + "=/path/for/output"); System.err.println(" Only one table can be specified, and no mapping allowed!"); @@ -415,8 +395,8 @@ private void usage(final String errorMsg) { System.err.println(" -D" + WALInputFormat.START_TIME_KEY + "=[date|ms]"); System.err.println(" -D" + WALInputFormat.END_TIME_KEY + "=[date|ms]"); System.err.println(" The start and the end date of timerange (inclusive). The dates can be"); - System.err.println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + - "format."); + System.err + .println(" expressed in milliseconds-since-epoch or yyyy-MM-dd'T'HH:mm:ss.SS " + "format."); System.err.println(" E.g. 1234567890120 or 2009-02-13T23:32:30.12"); System.err.println("Other options:"); System.err.println(" -D" + JOB_NAME_CONF_KEY + "=jobName"); @@ -424,14 +404,12 @@ private void usage(final String errorMsg) { System.err.println(" -Dwal.input.separator=' '"); System.err.println(" Change WAL filename separator (WAL dir names use default ','.)"); System.err.println("For performance also consider the following options:\n" - + " -Dmapreduce.map.speculative=false\n" - + " -Dmapreduce.reduce.speculative=false"); + + " -Dmapreduce.map.speculative=false\n" + " -Dmapreduce.reduce.speculative=false"); } /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java index b1f15bade5cc..427d4a341f04 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/package-info.java @@ -1,26 +1,18 @@ /* - * - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** -Provides HBase MapReduce -Input/OutputFormats, a table indexing MapReduce job, and utility methods. - -

    See HBase and MapReduce -in the HBase Reference Guide for mapreduce over hbase documentation. -*/ + * Provides HBase MapReduce + * Input/OutputFormats, a table indexing MapReduce job, and utility methods. + *

    + * See HBase and MapReduce in the HBase + * Reference Guide for mapreduce over hbase documentation. + */ package org.apache.hadoop.hbase.mapreduce; diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java index de7954052d16..96e30fa7da5d 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/replication/VerifyReplication.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -70,20 +69,18 @@ import org.slf4j.LoggerFactory; /** - * This map-only job compares the data from a local table with a remote one. - * Every cell is compared and must have exactly the same keys (even timestamp) - * as well as same value. It is possible to restrict the job by time range and - * families. The peer id that's provided must match the one given when the - * replication stream was setup. + * This map-only job compares the data from a local table with a remote one. Every cell is compared + * and must have exactly the same keys (even timestamp) as well as same value. It is possible to + * restrict the job by time range and families. The peer id that's provided must match the one given + * when the replication stream was setup. *

    - * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason - * for a why a row is different is shown in the map's log. + * Two counters are provided, Verifier.Counters.GOODROWS and BADROWS. The reason for a why a row is + * different is shown in the map's log. */ @InterfaceAudience.Private public class VerifyReplication extends Configured implements Tool { - private static final Logger LOG = - LoggerFactory.getLogger(VerifyReplication.class); + private static final Logger LOG = LoggerFactory.getLogger(VerifyReplication.class); public final static String NAME = "verifyrep"; private final static String PEER_CONFIG_PREFIX = NAME + ".peer."; @@ -100,29 +97,27 @@ public class VerifyReplication extends Configured implements Tool { int sleepMsBeforeReCompare = 0; boolean verbose = false; boolean includeDeletedCells = false; - //Source table snapshot name + // Source table snapshot name String sourceSnapshotName = null; - //Temp location in source cluster to restore source snapshot + // Temp location in source cluster to restore source snapshot String sourceSnapshotTmpDir = null; - //Peer table snapshot name + // Peer table snapshot name String peerSnapshotName = null; - //Temp location in peer cluster to restore peer snapshot + // Temp location in peer cluster to restore peer snapshot String peerSnapshotTmpDir = null; - //Peer cluster Hadoop FS address + // Peer cluster Hadoop FS address String peerFSAddress = null; - //Peer cluster HBase root dir location + // Peer cluster HBase root dir location String peerHBaseRootAddress = null; - //Peer Table Name + // Peer Table Name String peerTableName = null; - private final static String JOB_NAME_CONF_KEY = "mapreduce.job.name"; /** * Map-only comparator for 2 tables */ - public static class Verifier - extends TableMapper { + public static class Verifier extends TableMapper { public enum Counters { GOODROWS, BADROWS, ONLY_IN_SOURCE_TABLE_ROWS, ONLY_IN_PEER_TABLE_ROWS, CONTENT_DIFFERENT_ROWS @@ -140,22 +135,20 @@ public enum Counters { private int batch = -1; /** - * Map method that compares every scanned row with the equivalent from - * a distant cluster. - * @param row The current table row key. - * @param value The columns. - * @param context The current context. + * Map method that compares every scanned row with the equivalent from a distant cluster. + * @param row The current table row key. + * @param value The columns. + * @param context The current context. * @throws IOException When something is broken with the data. */ @Override - public void map(ImmutableBytesWritable row, final Result value, - Context context) + public void map(ImmutableBytesWritable row, final Result value, Context context) throws IOException { if (replicatedScanner == null) { Configuration conf = context.getConfiguration(); - sleepMsBeforeReCompare = conf.getInt(NAME +".sleepMsBeforeReCompare", 0); + sleepMsBeforeReCompare = conf.getInt(NAME + ".sleepMsBeforeReCompare", 0); delimiter = conf.get(NAME + ".delimiter", ""); - verbose = conf.getBoolean(NAME +".verbose", false); + verbose = conf.getBoolean(NAME + ".verbose", false); batch = conf.getInt(NAME + ".batch", -1); final Scan scan = new Scan(); if (batch > 0) { @@ -166,9 +159,9 @@ public void map(ImmutableBytesWritable row, final Result value, long startTime = conf.getLong(NAME + ".startTime", 0); long endTime = conf.getLong(NAME + ".endTime", Long.MAX_VALUE); String families = conf.get(NAME + ".families", null); - if(families != null) { + if (families != null) { String[] fams = families.split(","); - for(String fam : fams) { + for (String fam : fams) { scan.addFamily(Bytes.toBytes(fam)); } } @@ -177,7 +170,7 @@ public void map(ImmutableBytesWritable row, final Result value, String rowPrefixes = conf.get(NAME + ".rowPrefixes", null); setRowPrefixFilter(scan, rowPrefixes); scan.setTimeRange(startTime, endTime); - int versions = conf.getInt(NAME+".versions", -1); + int versions = conf.getInt(NAME + ".versions", -1); LOG.info("Setting number of version inside map as: " + versions); if (versions >= 0) { scan.setMaxVersions(versions); @@ -189,8 +182,8 @@ public void map(ImmutableBytesWritable row, final Result value, final InputSplit tableSplit = context.getInputSplit(); String zkClusterKey = conf.get(NAME + ".peerQuorumAddress"); - Configuration peerConf = HBaseConfiguration.createClusterConf(conf, - zkClusterKey, PEER_CONFIG_PREFIX); + Configuration peerConf = + HBaseConfiguration.createClusterConf(conf, zkClusterKey, PEER_CONFIG_PREFIX); String peerName = peerConf.get(NAME + ".peerTableName", tableName.getNameAsString()); TableName peerTableName = TableName.valueOf(peerName); @@ -215,12 +208,12 @@ public void map(ImmutableBytesWritable row, final Result value, String peerHBaseRootAddress = conf.get(NAME + ".peerHBaseRootAddress", null); FileSystem.setDefaultUri(peerConf, peerFSAddress); CommonFSUtils.setRootDir(peerConf, new Path(peerHBaseRootAddress)); - LOG.info("Using peer snapshot:" + peerSnapshotName + " with temp dir:" + - peerSnapshotTmpDir + " peer root uri:" + CommonFSUtils.getRootDir(peerConf) + - " peerFSAddress:" + peerFSAddress); + LOG.info("Using peer snapshot:" + peerSnapshotName + " with temp dir:" + + peerSnapshotTmpDir + " peer root uri:" + CommonFSUtils.getRootDir(peerConf) + + " peerFSAddress:" + peerFSAddress); replicatedScanner = new TableSnapshotScanner(peerConf, CommonFSUtils.getRootDir(peerConf), - new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName, scan, true); + new Path(peerFSAddress, peerSnapshotTmpDir), peerSnapshotName, scan, true); } else { replicatedScanner = replicatedTable.getScanner(scan); } @@ -239,8 +232,8 @@ public void map(ImmutableBytesWritable row, final Result value, Result.compareResults(value, currentCompareRowInPeerTable, false); context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { - LOG.info("Good row key: " + delimiter - + Bytes.toStringBinary(value.getRow()) + delimiter); + LOG.info( + "Good row key: " + delimiter + Bytes.toStringBinary(value.getRow()) + delimiter); } } catch (Exception e) { logFailRowAndIncreaseCounter(context, Counters.CONTENT_DIFFERENT_ROWS, value); @@ -270,21 +263,20 @@ private void logFailRowAndIncreaseCounter(Context context, Counters counter, Res if (!sourceResult.isEmpty()) { context.getCounter(Counters.GOODROWS).increment(1); if (verbose) { - LOG.info("Good row key (with recompare): " + delimiter + - Bytes.toStringBinary(row.getRow()) - + delimiter); + LOG.info("Good row key (with recompare): " + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } return; } catch (Exception e) { - LOG.error("recompare fail after sleep, rowkey=" + delimiter + - Bytes.toStringBinary(row.getRow()) + delimiter); + LOG.error("recompare fail after sleep, rowkey=" + delimiter + + Bytes.toStringBinary(row.getRow()) + delimiter); } } context.getCounter(counter).increment(1); context.getCounter(Counters.BADROWS).increment(1); - LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + - delimiter); + LOG.error(counter.toString() + ", rowkey=" + delimiter + Bytes.toStringBinary(row.getRow()) + + delimiter); } @Override @@ -311,7 +303,7 @@ protected void cleanup(Context context) { LOG.error("fail to close source table in cleanup", e); } } - if(sourceConnection != null){ + if (sourceConnection != null) { try { sourceConnection.close(); } catch (Exception e) { @@ -319,14 +311,14 @@ protected void cleanup(Context context) { } } - if(replicatedTable != null){ - try{ + if (replicatedTable != null) { + try { replicatedTable.close(); } catch (Exception e) { LOG.error("fail to close replicated table in cleanup", e); } } - if(replicatedConnection != null){ + if (replicatedConnection != null) { try { replicatedConnection.close(); } catch (Exception e) { @@ -336,8 +328,8 @@ protected void cleanup(Context context) { } } - private static Pair getPeerQuorumConfig( - final Configuration conf, String peerId) throws IOException { + private static Pair + getPeerQuorumConfig(final Configuration conf, String peerId) throws IOException { ZKWatcher localZKW = null; try { localZKW = new ZKWatcher(conf, "VerifyReplication", new Abortable() { @@ -351,7 +343,7 @@ public boolean isAborted() { } }); ReplicationPeerStorage storage = - ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf); + ReplicationStorageFactory.getReplicationPeerStorage(localZKW, conf); ReplicationPeerConfig peerConfig = storage.getPeerConfig(peerId); return Pair.newPair(peerConfig, ReplicationUtils.getPeerClusterConfiguration(peerConfig, conf)); @@ -366,9 +358,9 @@ public boolean isAborted() { } private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorumAddress) - throws IOException { + throws IOException { Configuration peerConf = - HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); + HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); FileSystem.setDefaultUri(peerConf, peerFSAddress); CommonFSUtils.setRootDir(peerConf, new Path(peerFSAddress, peerHBaseRootAddress)); FileSystem fs = FileSystem.get(peerConf); @@ -378,30 +370,28 @@ private void restoreSnapshotForPeerCluster(Configuration conf, String peerQuorum /** * Sets up the actual job. - * - * @param conf The current configuration. - * @param args The command line parameters. + * @param conf The current configuration. + * @param args The command line parameters. * @return The newly created job. * @throws java.io.IOException When setting up the job fails. */ - public Job createSubmittableJob(Configuration conf, String[] args) - throws IOException { + public Job createSubmittableJob(Configuration conf, String[] args) throws IOException { if (!doCommandLine(args)) { return null; } - conf.set(NAME+".tableName", tableName); - conf.setLong(NAME+".startTime", startTime); - conf.setLong(NAME+".endTime", endTime); - conf.setInt(NAME +".sleepMsBeforeReCompare", sleepMsBeforeReCompare); + conf.set(NAME + ".tableName", tableName); + conf.setLong(NAME + ".startTime", startTime); + conf.setLong(NAME + ".endTime", endTime); + conf.setInt(NAME + ".sleepMsBeforeReCompare", sleepMsBeforeReCompare); conf.set(NAME + ".delimiter", delimiter); conf.setInt(NAME + ".batch", batch); - conf.setBoolean(NAME +".verbose", verbose); - conf.setBoolean(NAME +".includeDeletedCells", includeDeletedCells); + conf.setBoolean(NAME + ".verbose", verbose); + conf.setBoolean(NAME + ".includeDeletedCells", includeDeletedCells); if (families != null) { - conf.set(NAME+".families", families); + conf.set(NAME + ".families", families); } - if (rowPrefixes != null){ - conf.set(NAME+".rowPrefixes", rowPrefixes); + if (rowPrefixes != null) { + conf.set(NAME + ".rowPrefixes", rowPrefixes); } String peerQuorumAddress; @@ -410,8 +400,8 @@ public Job createSubmittableJob(Configuration conf, String[] args) peerConfigPair = getPeerQuorumConfig(conf, peerId); ReplicationPeerConfig peerConfig = peerConfigPair.getFirst(); peerQuorumAddress = peerConfig.getClusterKey(); - LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + - peerConfig.getConfiguration()); + LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " + + peerConfig.getConfiguration()); conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress); HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX, peerConfig.getConfiguration().entrySet()); @@ -430,7 +420,7 @@ public Job createSubmittableJob(Configuration conf, String[] args) conf.setInt(NAME + ".versions", versions); LOG.info("Number of version: " + versions); - //Set Snapshot specific parameters + // Set Snapshot specific parameters if (peerSnapshotName != null) { conf.set(NAME + ".peerSnapshotName", peerSnapshotName); @@ -461,9 +451,9 @@ public Job createSubmittableJob(Configuration conf, String[] args) scan.setMaxVersions(versions); LOG.info("Number of versions set to " + versions); } - if(families != null) { + if (families != null) { String[] fams = families.split(","); - for(String fam : fams) { + for (String fam : fams) { scan.addFamily(Bytes.toBytes(fam)); } } @@ -486,8 +476,8 @@ public Job createSubmittableJob(Configuration conf, String[] args) assert peerConfigPair != null; peerClusterConf = peerConfigPair.getSecond(); } else { - peerClusterConf = HBaseConfiguration.createClusterConf(conf, - peerQuorumAddress, PEER_CONFIG_PREFIX); + peerClusterConf = + HBaseConfiguration.createClusterConf(conf, peerQuorumAddress, PEER_CONFIG_PREFIX); } // Obtain the auth token from peer cluster TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf); @@ -508,7 +498,7 @@ private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { } scan.setFilter(filterList); byte[] startPrefixRow = Bytes.toBytes(rowPrefixArray[0]); - byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length -1]); + byte[] lastPrefixRow = Bytes.toBytes(rowPrefixArray[rowPrefixArray.length - 1]); setStartAndStopRows(scan, startPrefixRow, lastPrefixRow); } } @@ -516,7 +506,7 @@ private static void setRowPrefixFilter(Scan scan, String rowPrefixes) { private static void setStartAndStopRows(Scan scan, byte[] startPrefixRow, byte[] lastPrefixRow) { scan.setStartRow(startPrefixRow); byte[] stopRow = Bytes.add(Bytes.head(lastPrefixRow, lastPrefixRow.length - 1), - new byte[]{(byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1)}); + new byte[] { (byte) (lastPrefixRow[lastPrefixRow.length - 1] + 1) }); scan.setStopRow(stopRow); } @@ -570,7 +560,7 @@ public boolean doCommandLine(final String[] args) { } final String rowPrefixesKey = "--row-prefixes="; - if (cmd.startsWith(rowPrefixesKey)){ + if (cmd.startsWith(rowPrefixesKey)) { rowPrefixes = cmd.substring(rowPrefixesKey.length()); continue; } @@ -639,7 +629,7 @@ public boolean doCommandLine(final String[] args) { return false; } - if (i == args.length-2) { + if (i == args.length - 2) { if (isPeerQuorumAddress(cmd)) { peerQuorumAddress = cmd; } else { @@ -647,7 +637,7 @@ public boolean doCommandLine(final String[] args) { } } - if (i == args.length-1) { + if (i == args.length - 1) { tableName = cmd; } } @@ -674,7 +664,7 @@ public boolean doCommandLine(final String[] args) { if ((sourceSnapshotName != null || peerSnapshotName != null) && sleepMsBeforeReCompare > 0) { printUsage( "Using sleepMsBeforeReCompare along with snapshots is not allowed as snapshots are" - + " immutable"); + + " immutable"); return false; } @@ -697,7 +687,7 @@ private boolean isPeerQuorumAddress(String cmd) { } /* - * @param errorMsg Error message. Can be null. + * @param errorMsg Error message. Can be null. */ private static void printUsage(final String errorMsg) { if (errorMsg != null && errorMsg.length() > 0) { @@ -715,13 +705,13 @@ private static void printUsage(final String errorMsg) { System.err.println(" endtime end of the time range"); System.err.println(" versions number of cell versions to verify"); System.err.println(" batch batch count for scan, note that" - + " result row counts will no longer be actual number of rows when you use this option"); + + " result row counts will no longer be actual number of rows when you use this option"); System.err.println(" raw includes raw scan if given in options"); System.err.println(" families comma-separated list of families to copy"); System.err.println(" row-prefixes comma-separated list of row key prefixes to filter on "); System.err.println(" delimiter the delimiter used in display around rowkey"); - System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + - "default value is 0 which disables the recompare."); + System.err.println(" recomparesleep milliseconds to sleep before recompare row, " + + "default value is 0 which disables the recompare."); System.err.println(" verbose logs row keys of good rows"); System.err.println(" peerTableName Peer Table Name"); System.err.println(" sourceSnapshotName Source Snapshot Name"); @@ -733,63 +723,59 @@ private static void printUsage(final String errorMsg) { System.err.println(); System.err.println("Args:"); System.err.println(" peerid Id of the peer used for verification," - + " must match the one given for replication"); + + " must match the one given for replication"); System.err.println(" peerQuorumAddress quorumAdress of the peer used for verification. The " - + "format is zk_quorum:zk_port:zk_hbase_path"); + + "format is zk_quorum:zk_port:zk_hbase_path"); System.err.println(" tablename Name of the table to verify"); System.err.println(); System.err.println("Examples:"); - System.err.println( - " To verify the data replicated from TestTable for a 1 hour window with peer #5 "); - System.err.println(" $ hbase " + - "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" + - " --starttime=1265875194289 --endtime=1265878794289 5 TestTable "); + System.err + .println(" To verify the data replicated from TestTable for a 1 hour window with peer #5 "); + System.err + .println(" $ hbase " + "org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication" + + " --starttime=1265875194289 --endtime=1265878794289 5 TestTable "); System.err.println(); System.err.println( " To verify the data in TestTable between the cluster runs VerifyReplication and cluster-b"); System.err.println(" Assume quorum address for cluster-b is" - + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:2181:/cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:2181:/cluster-b"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); System.err.println( " To verify the data in TestTable between the secured cluster runs VerifyReplication" - + " and insecure cluster-b"); - System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.security.authentication=simple \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + + " and insecure cluster-b"); + System.err + .println(" $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.security.authentication=simple \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); - System.err.println(" To verify the data in TestTable between" + - " the secured cluster runs VerifyReplication and secured cluster-b"); - System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + - ", for master and regionserver kerberos principal from another cluster"); + System.err.println(" To verify the data in TestTable between" + + " the secured cluster runs VerifyReplication and secured cluster-b"); + System.err.println(" Assume cluster-b uses different kerberos principal, cluster-b/_HOST@E" + + ", for master and regionserver kerberos principal from another cluster"); System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" - + "cluster-b/_HOST@EXAMPLE.COM \\\n" + - " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" + + "cluster-b/_HOST@EXAMPLE.COM \\\n" + + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); System.err.println(); System.err.println( " To verify the data in TestTable between the insecure cluster runs VerifyReplication" - + " and secured cluster-b"); + + " and secured cluster-b"); System.err.println( - " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + - " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" + - " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" - + "cluster-b/_HOST@EXAMPLE.COM \\\n" + - " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + - " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" - + "2181:/cluster-b \\\n" + - " TestTable"); + " $ hbase org.apache.hadoop.hbase.mapreduce.replication.VerifyReplication \\\n" + + " -D verifyrep.peer.hbase.security.authentication=kerberos \\\n" + + " -D verifyrep.peer.hbase.regionserver.kerberos.principal=" + + "cluster-b/_HOST@EXAMPLE.COM \\\n" + + " -D verifyrep.peer.hbase.master.kerberos.principal=cluster-b/_HOST@EXAMPLE.COM \\\n" + + " cluster-b-1.example.com,cluster-b-2.example.com,cluster-b-3.example.com:" + + "2181:/cluster-b \\\n" + " TestTable"); } @Override @@ -804,8 +790,7 @@ public int run(String[] args) throws Exception { /** * Main entry point. - * - * @param args The command line parameters. + * @param args The command line parameters. * @throws Exception When running the job fails. */ public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java index 4152182a6d31..4a5bed9b8b18 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/MobRefReporter.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,13 +62,11 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** - * Scans a given table + CF for all mob reference cells to get the list of backing mob files. - * For each referenced file we attempt to verify that said file is on the FileSystem in a place - * that the MOB system will look when attempting to resolve the actual value. - * - * The job includes counters that can help provide a rough sketch of the mob data. + * Scans a given table + CF for all mob reference cells to get the list of backing mob files. For + * each referenced file we attempt to verify that said file is on the FileSystem in a place that the + * MOB system will look when attempting to resolve the actual value. The job includes counters that + * can help provide a rough sketch of the mob data. * *

      * Map-Reduce Framework
    @@ -95,30 +92,25 @@
      *         Number of rows with total size in the 1,000,000s of bytes=3162
      * 
    * - * * Map-Reduce Framework:Map input records - the number of rows with mob references - * * Map-Reduce Framework:Reduce output records - the number of unique hfiles referenced - * * MOB:NUM_CELLS - the total number of mob reference cells - * * PROBLEM:Affected rows - the number of rows that reference hfiles with an issue - * * PROBLEM:Problem MOB files - the number of unique hfiles that have an issue - * * CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the - * number of cells in a given row by grouping by the number of digits used in each count. - * This allows us to see more about the distribution of cells than what we can determine - * with just the cell count and the row count. In this particular example we can see that - * all of our rows have somewhere between 1 - 9 cells. - * * ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order of - * magnitude of the number of rows in each of the hfiles with a problem. e.g. in the - * example there are 2 hfiles and they each have the same order of magnitude number of rows, - * specifically between 100 and 999. - * * SIZES OF CELLS: - this counter group gives a histogram of the order of magnitude of - * the size of mob values according to our reference cells. e.g. in the example above we - * have cell sizes that are all between 10,000 bytes and 9,999,999 bytes. From this - * histogram we can also see that _most_ cells are 100,000 - 999,000 bytes and the smaller - * and bigger ones are outliers making up less than 2% of mob cells. - * * SIZES OF ROWS: - this counter group gives a histogram of the order of magnitude of the - * size of mob values across each row according to our reference cells. In the example above - * we have rows that are are between 100,000 bytes and 9,999,999 bytes. We can also see that - * about 2/3rd of our rows are 100,000 - 999,999 bytes. - * + * * Map-Reduce Framework:Map input records - the number of rows with mob references * Map-Reduce + * Framework:Reduce output records - the number of unique hfiles referenced * MOB:NUM_CELLS - the + * total number of mob reference cells * PROBLEM:Affected rows - the number of rows that reference + * hfiles with an issue * PROBLEM:Problem MOB files - the number of unique hfiles that have an issue + * * CELLS PER ROW: - this counter group gives a histogram of the order of magnitude of the number + * of cells in a given row by grouping by the number of digits used in each count. This allows us to + * see more about the distribution of cells than what we can determine with just the cell count and + * the row count. In this particular example we can see that all of our rows have somewhere between + * 1 - 9 cells. * ROWS WITH PROBLEMS PER FILE: - this counter group gives a histogram of the order + * of magnitude of the number of rows in each of the hfiles with a problem. e.g. in the example + * there are 2 hfiles and they each have the same order of magnitude number of rows, specifically + * between 100 and 999. * SIZES OF CELLS: - this counter group gives a histogram of the order of + * magnitude of the size of mob values according to our reference cells. e.g. in the example above + * we have cell sizes that are all between 10,000 bytes and 9,999,999 bytes. From this histogram we + * can also see that _most_ cells are 100,000 - 999,000 bytes and the smaller and bigger ones are + * outliers making up less than 2% of mob cells. * SIZES OF ROWS: - this counter group gives a + * histogram of the order of magnitude of the size of mob values across each row according to our + * reference cells. In the example above we have rows that are are between 100,000 bytes and + * 9,999,999 bytes. We can also see that about 2/3rd of our rows are 100,000 - 999,999 bytes. * Generates a report that gives one file status per line, with tabs dividing fields. * *
    @@ -132,33 +124,28 @@
      * MISSING FILE    28e252d7f013973174750d483d358fa020191101f73536e7133f4cd3ab1065edf588d509        MmJiMjMyYzBiMTNjNzc0OTY1ZWY4NTU4ZjBmYmQ2MTUtNTIz,MmEzOGE0YTkzMTZjNDllNWE4MzM1MTdjNDVkMzEwNzAtODg=
      * 
    * - * Possible results are listed; the first three indicate things are working properly. - * * MOB DIR - the reference is in the normal MOB area for the given table and CF - * * HLINK TO ARCHIVE FOR SAME TABLE - the reference is present in the archive area for this - * table and CF - * * HLINK TO ARCHIVE FOR OTHER TABLE - the reference is present in a different table and CF, - * either in the MOB or archive areas (e.g. from a snapshot restore or clone) - * * ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the reference is currently present in the archive - * area for this table and CF, but it is kept there because a _different_ table has a - * reference to it (e.g. from a snapshot clone). If these other tables are removed then - * the file will likely be deleted unless there is a snapshot also referencing it. - * * ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for this table and - * CF, but there are no references present to prevent its removal. Unless it is newer than - * the general TTL (default 5 minutes) or referenced in a snapshot it will be subject to - * cleaning. - * * ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things failed while - * looking for why this file is being kept around. - * * MISSING FILE - We couldn't find the reference on the FileSystem. Either there is dataloss due - * to a bug in the MOB storage system or the MOB storage is damaged but in an edge case that - * allows it to work for now. You can verify which by doing a raw reference scan to get the - * referenced hfile and check the underlying filesystem. See the ref guide section on mob - * for details. - * * HLINK BUT POINT TO MISSING FILE - There is a pointer in our mob area for this table and CF - * to a file elsewhere on the FileSystem, however the file it points to no longer exists. - * * MISSING FILE BUT FAILURE WHILE CHECKING HLINKS - We could not find the referenced file, - * however you should check the job logs to see why we couldn't check to see if there is a - * pointer to the referenced file in our archive or another table's archive or mob area. - * + * Possible results are listed; the first three indicate things are working properly. * MOB DIR - + * the reference is in the normal MOB area for the given table and CF * HLINK TO ARCHIVE FOR SAME + * TABLE - the reference is present in the archive area for this table and CF * HLINK TO ARCHIVE FOR + * OTHER TABLE - the reference is present in a different table and CF, either in the MOB or archive + * areas (e.g. from a snapshot restore or clone) * ARCHIVE WITH HLINK BUT NOT FROM OUR TABLE - the + * reference is currently present in the archive area for this table and CF, but it is kept there + * because a _different_ table has a reference to it (e.g. from a snapshot clone). If these other + * tables are removed then the file will likely be deleted unless there is a snapshot also + * referencing it. * ARCHIVE BUT NO HLINKS - the reference is currently present in the archive for + * this table and CF, but there are no references present to prevent its removal. Unless it is newer + * than the general TTL (default 5 minutes) or referenced in a snapshot it will be subject to + * cleaning. * ARCHIVE BUT FAILURE WHILE CHECKING HLINKS - Check the job logs to see why things + * failed while looking for why this file is being kept around. * MISSING FILE - We couldn't find + * the reference on the FileSystem. Either there is dataloss due to a bug in the MOB storage system + * or the MOB storage is damaged but in an edge case that allows it to work for now. You can verify + * which by doing a raw reference scan to get the referenced hfile and check the underlying + * filesystem. See the ref guide section on mob for details. * HLINK BUT POINT TO MISSING FILE - + * There is a pointer in our mob area for this table and CF to a file elsewhere on the FileSystem, + * however the file it points to no longer exists. * MISSING FILE BUT FAILURE WHILE CHECKING HLINKS + * - We could not find the referenced file, however you should check the job logs to see why we + * couldn't check to see if there is a pointer to the referenced file in our archive or another + * table's archive or mob area. */ @InterfaceAudience.Private public class MobRefReporter extends Configured implements Tool { @@ -169,8 +156,8 @@ public class MobRefReporter extends Configured implements Tool { public static class MobRefMapper extends TableMapper { @Override - public void map(ImmutableBytesWritable r, Result columns, Context context) throws IOException, - InterruptedException { + public void map(ImmutableBytesWritable r, Result columns, Context context) + throws IOException, InterruptedException { if (columns == null) { return; } @@ -190,24 +177,29 @@ public void map(ImmutableBytesWritable r, Result columns, Context context) throw files.add(fileName); } final int cellsize = MobUtils.getMobValueLength(c); - context.getCounter("SIZES OF CELLS", "Number of cells with size in the " + - log10GroupedString(cellsize) + "s of bytes").increment(1L); + context + .getCounter("SIZES OF CELLS", + "Number of cells with size in the " + log10GroupedString(cellsize) + "s of bytes") + .increment(1L); size += cellsize; count++; } else { LOG.debug("cell is not a mob ref, even though we asked for only refs. cell={}", c); } } - context.getCounter("CELLS PER ROW", "Number of rows with " + log10GroupedString(count) + - "s of cells per row").increment(1L); - context.getCounter("SIZES OF ROWS", "Number of rows with total size in the " + - log10GroupedString(size) + "s of bytes").increment(1L); - context.getCounter("MOB","NUM_CELLS").increment(count); + context + .getCounter("CELLS PER ROW", + "Number of rows with " + log10GroupedString(count) + "s of cells per row") + .increment(1L); + context + .getCounter("SIZES OF ROWS", + "Number of rows with total size in the " + log10GroupedString(size) + "s of bytes") + .increment(1L); + context.getCounter("MOB", "NUM_CELLS").increment(count); } } - public static class MobRefReducer extends - Reducer { + public static class MobRefReducer extends Reducer { TableName table; String mobRegion; @@ -246,7 +238,7 @@ public void setup(Context context) throws IOException, InterruptedException { mob = MobUtils.getMobFamilyPath(conf, table, family); LOG.info("Using active mob area '{}'", mob); archive = HFileArchiveUtil.getStoreArchivePath(conf, table, - MobUtils.getMobRegionInfo(table).getEncodedName(), family); + MobUtils.getMobRegionInfo(table).getEncodedName(), family); LOG.info("Using archive mob area '{}'", archive); seperator = conf.get(TextOutputFormat.SEPERATOR, "\t"); } @@ -260,7 +252,7 @@ public void reduce(Text key, Iterable rows, Context cont if (mob.getFileSystem(conf).exists(new Path(mob, file))) { LOG.debug("Found file '{}' in mob area", file); context.write(OK_MOB_DIR, key); - // archive area - is there an hlink back reference (from a snapshot from same table) + // archive area - is there an hlink back reference (from a snapshot from same table) } else if (archive.getFileSystem(conf).exists(new Path(archive, file))) { Path backRefDir = HFileLink.getBackReferencesDir(archive, file); @@ -269,37 +261,41 @@ public void reduce(Text key, Iterable rows, Context cont if (backRefs != null) { boolean found = false; for (FileStatus backRef : backRefs) { - Pair refParts = HFileLink.parseBackReferenceName( - backRef.getPath().getName()); + Pair refParts = + HFileLink.parseBackReferenceName(backRef.getPath().getName()); if (table.equals(refParts.getFirst()) && mobRegion.equals(refParts.getSecond())) { Path hlinkPath = HFileLink.getHFileFromBackReference(MobUtils.getMobHome(conf), - backRef.getPath()); + backRef.getPath()); if (hlinkPath.getFileSystem(conf).exists(hlinkPath)) { found = true; } else { LOG.warn("Found file '{}' in archive area with a back reference to the mob area " + "for our table, but the mob area does not have a corresponding hfilelink.", - file); + file); } } } if (found) { LOG.debug("Found file '{}' in archive area. has proper hlink back references to " - + "suggest it is from a restored snapshot for this table.", file); + + "suggest it is from a restored snapshot for this table.", + file); context.write(OK_HLINK_RESTORE, key); } else { LOG.warn("Found file '{}' in archive area, but the hlink back references do not " - + "properly point to the mob area for our table.", file); + + "properly point to the mob area for our table.", + file); context.write(INCONSISTENT_ARCHIVE_BAD_LINK, encodeRows(context, key, rows)); } } else { LOG.warn("Found file '{}' in archive area, but there are no hlinks pointing to it. Not " - + "yet used snapshot or an error.", file); + + "yet used snapshot or an error.", + file); context.write(INCONSISTENT_ARCHIVE_STALE, encodeRows(context, key, rows)); } } catch (IOException e) { LOG.warn("Found file '{}' in archive area, but got an error while checking " - + "on back references.", file, e); + + "on back references.", + file, e); context.write(INCONSISTENT_ARCHIVE_IOE, encodeRows(context, key, rows)); } @@ -307,19 +303,19 @@ public void reduce(Text key, Iterable rows, Context cont // check for an hlink in the active mob area (from a snapshot of a different table) try { /** - * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because - * we know the mob region never splits, so we can only have HFileLink references - * and looking for just them is cheaper then listing everything. - * - * This glob should match the naming convention for HFileLinks to our referenced hfile. - * As simplified explanation those file names look like "table=region-hfile". For details - * see the {@link HFileLink#createHFileLinkName HFileLink implementation}. + * we are doing this ourselves instead of using FSUtils.getReferenceFilePaths because we + * know the mob region never splits, so we can only have HFileLink references and looking + * for just them is cheaper then listing everything. This glob should match the naming + * convention for HFileLinks to our referenced hfile. As simplified explanation those file + * names look like "table=region-hfile". For details see the + * {@link HFileLink#createHFileLinkName HFileLink implementation}. */ FileStatus[] hlinks = mob.getFileSystem(conf).globStatus(new Path(mob + "/*=*-" + file)); if (hlinks != null && hlinks.length != 0) { if (hlinks.length != 1) { - LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than " + - "one: {}", file, Arrays.deepToString(hlinks)); + LOG.warn("Found file '{}' as hfilelinks in the mob area, but there are more than " + + "one: {}", + file, Arrays.deepToString(hlinks)); } HFileLink found = null; for (FileStatus hlink : hlinks) { @@ -336,7 +332,8 @@ public void reduce(Text key, Iterable rows, Context cont context.write(OK_HLINK_CLONE, key); } else { LOG.warn("Found file '{}' as ref(s) in the mob area but they do not point to an hfile" - + " that exists.", file); + + " that exists.", + file); context.write(DATALOSS_HLINK_DANGLING, encodeRows(context, key, rows)); } } else { @@ -352,8 +349,8 @@ public void reduce(Text key, Iterable rows, Context cont } } catch (IOException e) { LOG.error( - "Exception while checking mob area of our table for HFileLinks that point to {}", - file, e); + "Exception while checking mob area of our table for HFileLinks that point to {}", file, + e); context.write(DATALOSS_MISSING_IOE, encodeRows(context, key, rows)); } } @@ -379,25 +376,27 @@ private Text encodeRows(Context context, Text key, Iterable 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc. + * Returns the string representation of the given number after grouping it into log10 buckets. + * e.g. 0-9 -> 1, 10-99 -> 10, ..., 100,000-999,999 -> 100,000, etc. */ static String log10GroupedString(long number) { - return String.format("%,d", (long)(Math.pow(10d, Math.floor(Math.log10(number))))); + return String.format("%,d", (long) (Math.pow(10d, Math.floor(Math.log10(number))))); } /** * Main method for the tool. - * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, - * 3 if mr job was unsuccessful + * @return 0 if success, 1 for bad args. 2 if job aborted with an exception, 3 if mr job was + * unsuccessful */ public int run(String[] args) throws IOException, InterruptedException { // TODO make family and table optional @@ -427,7 +426,7 @@ public int run(String[] args) throws IOException, InterruptedException { } } else { LOG.error("The passed configs point to an HBase dir does not exist: {}", - conf.get(HConstants.HBASE_DIR)); + conf.get(HConstants.HBASE_DIR)); throw new IOException("The target HBase does not exist"); } @@ -435,7 +434,7 @@ public int run(String[] args) throws IOException, InterruptedException { int maxVersions; TableName tn = TableName.valueOf(tableName); try (Connection connection = ConnectionFactory.createConnection(conf); - Admin admin = connection.getAdmin()) { + Admin admin = connection.getAdmin()) { TableDescriptor htd = admin.getDescriptor(tn); ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(familyName)); if (hcd == null || !hcd.isMobEnabled()) { @@ -445,7 +444,6 @@ public int run(String[] args) throws IOException, InterruptedException { maxVersions = hcd.getMaxVersions(); } - String id = getClass().getSimpleName() + UUID.randomUUID().toString().replace("-", ""); Job job = null; Scan scan = new Scan(); @@ -462,8 +460,8 @@ public int run(String[] args) throws IOException, InterruptedException { job = Job.getInstance(conf); job.setJarByClass(getClass()); - TableMapReduceUtil.initTableMapperJob(tn, scan, - MobRefMapper.class, Text.class, ImmutableBytesWritable.class, job); + TableMapReduceUtil.initTableMapperJob(tn, scan, MobRefMapper.class, Text.class, + ImmutableBytesWritable.class, job); job.setReducerClass(MobRefReducer.class); job.setOutputFormatClass(TextOutputFormat.class); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java index fd09e34fde16..837b682a4c3b 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/regionserver/CompactionTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,12 +66,9 @@ import org.slf4j.LoggerFactory; /* - * The CompactionTool allows to execute a compaction specifying a: - *
      - *
    • table folder (all regions and families will be compacted) - *
    • region folder (all families in the region will be compacted) - *
    • family folder (the store files will be compacted) - *
    + * The CompactionTool allows to execute a compaction specifying a:
    • table folder (all + * regions and families will be compacted)
    • region folder (all families in the region will be + * compacted)
    • family folder (the store files will be compacted)
    */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class CompactionTool extends Configured implements Tool { @@ -82,8 +79,8 @@ public class CompactionTool extends Configured implements Tool { private final static String CONF_DELETE_COMPACTED = "hbase.compactiontool.delete"; /** - * Class responsible to execute the Compaction on the specified path. - * The path can be a table, region or family directory. + * Class responsible to execute the Compaction on the specified path. The path can be a table, + * region or family directory. */ private static class CompactionWorker { private final boolean deleteCompacted; @@ -98,7 +95,6 @@ public CompactionWorker(final FileSystem fs, final Configuration conf) { /** * Execute the compaction on the specified path. - * * @param path Directory path on which to run compaction. * @param compactOnce Execute just a single step of compaction. * @param major Request major compaction. @@ -110,8 +106,7 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma Path tableDir = regionDir.getParent(); TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - compactStoreFiles(tableDir, htd, hri, - path.getName(), compactOnce, major); + compactStoreFiles(tableDir, htd, hri, path.getName(), compactOnce, major); } else if (isRegionDir(fs, path)) { Path tableDir = path.getParent(); TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); @@ -120,39 +115,36 @@ public void compact(final Path path, final boolean compactOnce, final boolean ma compactTable(path, compactOnce, major); } else { throw new IOException( - "Specified path is not a table, region or family directory. path=" + path); + "Specified path is not a table, region or family directory. path=" + path); } } private void compactTable(final Path tableDir, final boolean compactOnce, final boolean major) throws IOException { TableDescriptor htd = FSTableDescriptors.getTableDescriptorFromFs(fs, tableDir); - for (Path regionDir: FSUtils.getRegionDirs(fs, tableDir)) { + for (Path regionDir : FSUtils.getRegionDirs(fs, tableDir)) { compactRegion(tableDir, htd, regionDir, compactOnce, major); } } - private void compactRegion(final Path tableDir, final TableDescriptor htd, - final Path regionDir, final boolean compactOnce, final boolean major) - throws IOException { + private void compactRegion(final Path tableDir, final TableDescriptor htd, final Path regionDir, + final boolean compactOnce, final boolean major) throws IOException { RegionInfo hri = HRegionFileSystem.loadRegionInfoFileContent(fs, regionDir); - for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) { + for (Path familyDir : FSUtils.getFamilyDirs(fs, regionDir)) { compactStoreFiles(tableDir, htd, hri, familyDir.getName(), compactOnce, major); } } /** - * Execute the actual compaction job. - * If the compact once flag is not specified, execute the compaction until - * no more compactions are needed. Uses the Configuration settings provided. + * Execute the actual compaction job. If the compact once flag is not specified, execute the + * compaction until no more compactions are needed. Uses the Configuration settings provided. */ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, final RegionInfo hri, final String familyName, final boolean compactOnce, final boolean major) throws IOException { HStore store = getStore(conf, fs, tableDir, htd, hri, familyName); - LOG.info("Compact table=" + htd.getTableName() + - " region=" + hri.getRegionNameAsString() + - " family=" + familyName); + LOG.info("Compact table=" + htd.getTableName() + " region=" + hri.getRegionNameAsString() + + " family=" + familyName); if (major) { store.triggerMajorCompaction(); } @@ -166,13 +158,13 @@ private void compactStoreFiles(final Path tableDir, final TableDescriptor htd, store.compact(compaction.get(), NoLimitThroughputController.INSTANCE, null); if (storeFiles != null && !storeFiles.isEmpty()) { if (deleteCompacted) { - for (HStoreFile storeFile: storeFiles) { + for (HStoreFile storeFile : storeFiles) { fs.delete(storeFile.getPath(), false); } } } } while (store.needsCompaction() && !compactOnce); - //We need to close the store properly, to make sure it will archive compacted files + // We need to close the store properly, to make sure it will archive compacted files store.close(); } @@ -236,8 +228,8 @@ protected boolean isSplitable(JobContext context, Path file) { } /** - * Returns a split for each store files directory using the block location - * of each file as locality reference. + * Returns a split for each store files directory using the block location of each file as + * locality reference. */ @Override public List getSplits(JobContext job) throws IOException { @@ -245,7 +237,7 @@ public List getSplits(JobContext job) throws IOException { List files = listStatus(job); Text key = new Text(); - for (FileStatus file: files) { + for (FileStatus file : files) { Path path = file.getPath(); FileSystem fs = path.getFileSystem(job.getConfiguration()); LineReader reader = new LineReader(fs.open(path)); @@ -276,9 +268,9 @@ private static String[] getStoreDirHosts(final FileSystem fs, final Path path) } HDFSBlocksDistribution hdfsBlocksDistribution = new HDFSBlocksDistribution(); - for (FileStatus hfileStatus: files) { + for (FileStatus hfileStatus : files) { HDFSBlocksDistribution storeFileBlocksDistribution = - FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen()); + FSUtils.computeHDFSBlocksDistribution(fs, hfileStatus, 0, hfileStatus.getLen()); hdfsBlocksDistribution.add(storeFileBlocksDistribution); } @@ -287,27 +279,26 @@ private static String[] getStoreDirHosts(final FileSystem fs, final Path path) } /** - * Create the input file for the given directories to compact. - * The file is a TextFile with each line corrisponding to a - * store files directory to compact. + * Create the input file for the given directories to compact. The file is a TextFile with each + * line corrisponding to a store files directory to compact. */ public static List createInputFile(final FileSystem fs, final FileSystem stagingFs, final Path path, final Set toCompactDirs) throws IOException { // Extract the list of store dirs List storeDirs = new LinkedList<>(); - for (Path compactDir: toCompactDirs) { + for (Path compactDir : toCompactDirs) { if (isFamilyDir(fs, compactDir)) { storeDirs.add(compactDir); } else if (isRegionDir(fs, compactDir)) { storeDirs.addAll(FSUtils.getFamilyDirs(fs, compactDir)); } else if (isTableDir(fs, compactDir)) { // Lookup regions - for (Path regionDir: FSUtils.getRegionDirs(fs, compactDir)) { + for (Path regionDir : FSUtils.getRegionDirs(fs, compactDir)) { storeDirs.addAll(FSUtils.getFamilyDirs(fs, regionDir)); } } else { throw new IOException( - "Specified path is not a table, region or family directory. path=" + compactDir); + "Specified path is not a table, region or family directory. path=" + compactDir); } } @@ -316,7 +307,7 @@ public static List createInputFile(final FileSystem fs, final FileSystem s LOG.info("Create input file=" + path + " with " + storeDirs.size() + " dirs to compact."); try { final byte[] newLine = Bytes.toBytes("\n"); - for (Path storeDir: storeDirs) { + for (Path storeDir : storeDirs) { stream.write(Bytes.toBytes(storeDir.toString())); stream.write(newLine); } @@ -352,16 +343,16 @@ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, FileSystem stagingFs = stagingDir.getFileSystem(conf); try { // Create input file with the store dirs - Path inputPath = new Path(stagingDir, "compact-"+ EnvironmentEdgeManager.currentTime()); - List storeDirs = CompactionInputFormat.createInputFile(fs, stagingFs, - inputPath, toCompactDirs); + Path inputPath = new Path(stagingDir, "compact-" + EnvironmentEdgeManager.currentTime()); + List storeDirs = + CompactionInputFormat.createInputFile(fs, stagingFs, inputPath, toCompactDirs); CompactionInputFormat.addInputPath(job, inputPath); // Initialize credential for secure cluster TableMapReduceUtil.initCredentials(job); // Despite the method name this will get delegation token for the filesystem - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - storeDirs.toArray(new Path[0]), conf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), storeDirs.toArray(new Path[0]), + conf); // Start the MR Job and wait return job.waitForCompletion(true) ? 0 : 1; @@ -376,7 +367,7 @@ private int doMapReduce(final FileSystem fs, final Set toCompactDirs, private int doClient(final FileSystem fs, final Set toCompactDirs, final boolean compactOnce, final boolean major) throws IOException { CompactionWorker worker = new CompactionWorker(fs, getConf()); - for (Path path: toCompactDirs) { + for (Path path : toCompactDirs) { worker.compact(path, compactOnce, major); } return 0; @@ -449,16 +440,17 @@ private void printUsage(final String message) { System.err.println(); System.err.println("Note: -D properties will be applied to the conf used. "); System.err.println("For example: "); - System.err.println(" To stop delete of compacted file, pass -D"+CONF_DELETE_COMPACTED+"=false"); + System.err + .println(" To stop delete of compacted file, pass -D" + CONF_DELETE_COMPACTED + "=false"); System.err.println(); System.err.println("Examples:"); System.err.println(" To compact the full 'TestTable' using MapReduce:"); - System.err.println(" $ hbase " + this.getClass().getName() + - " -mapred hdfs://hbase/data/default/TestTable"); + System.err.println( + " $ hbase " + this.getClass().getName() + " -mapred hdfs://hbase/data/default/TestTable"); System.err.println(); System.err.println(" To compact column family 'x' of the table 'TestTable' region 'abc':"); - System.err.println(" $ hbase " + this.getClass().getName() + - " hdfs://hbase/data/default/TestTable/abc/x"); + System.err.println( + " $ hbase " + this.getClass().getName() + " hdfs://hbase/data/default/TestTable/abc/x"); } public static void main(String[] args) throws Exception { diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java index f586cb215f82..509e5773bfd7 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/snapshot/ExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.snapshot; import java.io.BufferedInputStream; @@ -85,11 +84,9 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotRegionManifest; /** - * Export the specified snapshot to a given FileSystem. - * - * The .snapshot/name folder is copied to the destination cluster - * and then all the hfiles/wals are copied using a Map-Reduce Job in the .archive/ location. - * When everything is done, the second cluster can restore the snapshot. + * Export the specified snapshot to a given FileSystem. The .snapshot/name folder is copied to the + * destination cluster and then all the hfiles/wals are copied using a Map-Reduce Job in the + * .archive/ location. When everything is done, the second cluster can restore the snapshot. */ @InterfaceAudience.Public public class ExportSnapshot extends AbstractHBaseTool implements Tool { @@ -131,40 +128,40 @@ static class Testing { // Command line options and defaults. static final class Options { static final Option SNAPSHOT = new Option(null, "snapshot", true, "Snapshot to restore."); - static final Option TARGET_NAME = new Option(null, "target", true, - "Target name for the snapshot."); - static final Option COPY_TO = new Option(null, "copy-to", true, "Remote " - + "destination hdfs://"); - static final Option COPY_FROM = new Option(null, "copy-from", true, - "Input folder hdfs:// (default hbase.rootdir)"); + static final Option TARGET_NAME = + new Option(null, "target", true, "Target name for the snapshot."); + static final Option COPY_TO = + new Option(null, "copy-to", true, "Remote " + "destination hdfs://"); + static final Option COPY_FROM = + new Option(null, "copy-from", true, "Input folder hdfs:// (default hbase.rootdir)"); static final Option NO_CHECKSUM_VERIFY = new Option(null, "no-checksum-verify", false, "Do not verify checksum, use name+length only."); static final Option NO_TARGET_VERIFY = new Option(null, "no-target-verify", false, "Do not verify the integrity of the exported snapshot."); - static final Option NO_SOURCE_VERIFY = new Option(null, "no-source-verify", false, - "Do not verify the source of the snapshot."); - static final Option OVERWRITE = new Option(null, "overwrite", false, - "Rewrite the snapshot manifest if already exists."); - static final Option CHUSER = new Option(null, "chuser", true, - "Change the owner of the files to the specified one."); - static final Option CHGROUP = new Option(null, "chgroup", true, - "Change the group of the files to the specified one."); - static final Option CHMOD = new Option(null, "chmod", true, - "Change the permission of the files to the specified one."); + static final Option NO_SOURCE_VERIFY = + new Option(null, "no-source-verify", false, "Do not verify the source of the snapshot."); + static final Option OVERWRITE = + new Option(null, "overwrite", false, "Rewrite the snapshot manifest if already exists."); + static final Option CHUSER = + new Option(null, "chuser", true, "Change the owner of the files to the specified one."); + static final Option CHGROUP = + new Option(null, "chgroup", true, "Change the group of the files to the specified one."); + static final Option CHMOD = + new Option(null, "chmod", true, "Change the permission of the files to the specified one."); static final Option MAPPERS = new Option(null, "mappers", true, "Number of mappers to use during the copy (mapreduce.job.maps)."); - static final Option BANDWIDTH = new Option(null, "bandwidth", true, - "Limit bandwidth to this value in MB/second."); + static final Option BANDWIDTH = + new Option(null, "bandwidth", true, "Limit bandwidth to this value in MB/second."); } // Export Map-Reduce Counters, to keep track of the progress public enum Counter { - MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, - BYTES_EXPECTED, BYTES_SKIPPED, BYTES_COPIED + MISSING_FILES, FILES_COPIED, FILES_SKIPPED, COPY_FAILED, BYTES_EXPECTED, BYTES_SKIPPED, + BYTES_COPIED } - private static class ExportMapper extends Mapper { + private static class ExportMapper + extends Mapper { private static final Logger LOG = LoggerFactory.getLogger(ExportMapper.class); final static int REPORT_SIZE = 1 * 1024 * 1024; final static int BUFFER_SIZE = 64 * 1024; @@ -196,7 +193,7 @@ public void setup(Context context) throws IOException { filesGroup = conf.get(CONF_FILES_GROUP); filesUser = conf.get(CONF_FILES_USER); - filesMode = (short)conf.getInt(CONF_FILES_MODE, 0); + filesMode = (short) conf.getInt(CONF_FILES_MODE, 0); outputRoot = new Path(conf.get(CONF_OUTPUT_ROOT)); inputRoot = new Path(conf.get(CONF_INPUT_ROOT)); @@ -214,7 +211,7 @@ public void setup(Context context) throws IOException { destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); outputFs = FileSystem.get(outputRoot.toUri(), destConf); } catch (IOException e) { - throw new IOException("Could not get the output FileSystem with root="+ outputRoot, e); + throw new IOException("Could not get the output FileSystem with root=" + outputRoot, e); } // Use the default block size of the outputFs if bigger @@ -257,7 +254,7 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException case HFILE: Path inputPath = new Path(inputInfo.getHfile()); String family = inputPath.getParent().getName(); - TableName table =HFileLink.getReferencedTableName(inputPath.getName()); + TableName table = HFileLink.getReferencedTableName(inputPath.getName()); String region = HFileLink.getReferencedRegionName(inputPath.getName()); String hfile = HFileLink.getReferencedHFileName(inputPath.getName()); path = new Path(CommonFSUtils.getTableDir(new Path("./"), table), @@ -274,8 +271,8 @@ private Path getOutputPath(final SnapshotFileInfo inputInfo) throws IOException @SuppressWarnings("checkstyle:linelength") /** - * Used by TestExportSnapshot to test for retries when failures happen. - * Failure is injected in {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. + * Used by TestExportSnapshot to test for retries when failures happen. Failure is injected in + * {@link #copyFile(Mapper.Context, org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotFileInfo, Path)}. */ private void injectTestFailure(final Context context, final SnapshotFileInfo inputInfo) throws IOException { @@ -285,7 +282,7 @@ private void injectTestFailure(final Context context, final SnapshotFileInfo inp context.getCounter(Counter.COPY_FAILED).increment(1); LOG.debug("Injecting failure. Count: " + testing.injectedFailureCount); throw new IOException(String.format("TEST FAILURE (%d of max %d): Unable to copy input=%s", - testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); + testing.injectedFailureCount, testing.failuresCountToInject, inputInfo)); } private void copyFile(final Context context, final SnapshotFileInfo inputInfo, @@ -357,10 +354,8 @@ private void createOutputPath(final Path path) throws IOException { /** * Try to Preserve the files attribute selected by the user copying them from the source file * This is only required when you are exporting as a different user than "hbase" or on a system - * that doesn't have the "hbase" user. - * - * This is not considered a blocking failure since the user can force a chmod with the user - * that knows is available on the system. + * that doesn't have the "hbase" user. This is not considered a blocking failure since the user + * can force a chmod with the user that knows is available on the system. */ private boolean preserveAttributes(final Path path, final FileStatus refStat) { FileStatus stat; @@ -378,7 +373,7 @@ private boolean preserveAttributes(final Path path, final FileStatus refStat) { outputFs.setPermission(path, refStat.getPermission()); } } catch (IOException e) { - LOG.warn("Unable to set the permission for file="+ stat.getPath() +": "+ e.getMessage()); + LOG.warn("Unable to set the permission for file=" + stat.getPath() + ": " + e.getMessage()); return false; } @@ -391,9 +386,10 @@ private boolean preserveAttributes(final Path path, final FileStatus refStat) { outputFs.setOwner(path, user, group); } } catch (IOException e) { - LOG.warn("Unable to set the owner/group for file="+ stat.getPath() +": "+ e.getMessage()); - LOG.warn("The user/group may not exist on the destination cluster: user=" + - user + " group=" + group); + LOG.warn( + "Unable to set the owner/group for file=" + stat.getPath() + ": " + e.getMessage()); + LOG.warn("The user/group may not exist on the destination cluster: user=" + user + + " group=" + group); return false; } } @@ -405,13 +401,11 @@ private boolean stringIsNotEmpty(final String str) { return str != null && str.length() > 0; } - private void copyData(final Context context, - final Path inputPath, final InputStream in, - final Path outputPath, final FSDataOutputStream out, - final long inputFileSize) + private void copyData(final Context context, final Path inputPath, final InputStream in, + final Path outputPath, final FSDataOutputStream out, final long inputFileSize) throws IOException { - final String statusMessage = "copied %s/" + StringUtils.humanReadableInt(inputFileSize) + - " (%.1f%%)"; + final String statusMessage = + "copied %s/" + StringUtils.humanReadableInt(inputFileSize) + " (%.1f%%)"; try { byte[] buffer = new byte[bufferSize]; @@ -427,33 +421,33 @@ private void copyData(final Context context, if (reportBytes >= REPORT_SIZE) { context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); - context.setStatus(String.format(statusMessage, - StringUtils.humanReadableInt(totalBytesWritten), - (totalBytesWritten/(float)inputFileSize) * 100.0f) + - " from " + inputPath + " to " + outputPath); + context.setStatus( + String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), + (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + + " to " + outputPath); reportBytes = 0; } } long etime = EnvironmentEdgeManager.currentTime(); context.getCounter(Counter.BYTES_COPIED).increment(reportBytes); - context.setStatus(String.format(statusMessage, - StringUtils.humanReadableInt(totalBytesWritten), - (totalBytesWritten/(float)inputFileSize) * 100.0f) + - " from " + inputPath + " to " + outputPath); + context + .setStatus(String.format(statusMessage, StringUtils.humanReadableInt(totalBytesWritten), + (totalBytesWritten / (float) inputFileSize) * 100.0f) + " from " + inputPath + " to " + + outputPath); // Verify that the written size match if (totalBytesWritten != inputFileSize) { - String msg = "number of bytes copied not matching copied=" + totalBytesWritten + - " expected=" + inputFileSize + " for file=" + inputPath; + String msg = "number of bytes copied not matching copied=" + totalBytesWritten + + " expected=" + inputFileSize + " for file=" + inputPath; throw new IOException(msg); } LOG.info("copy completed for input=" + inputPath + " output=" + outputPath); - LOG.info("size=" + totalBytesWritten + - " (" + StringUtils.humanReadableInt(totalBytesWritten) + ")" + - " time=" + StringUtils.formatTimeDiff(etime, stime) + - String.format(" %.3fM/sec", (totalBytesWritten / ((etime - stime)/1000.0))/1048576.0)); + LOG.info( + "size=" + totalBytesWritten + " (" + StringUtils.humanReadableInt(totalBytesWritten) + ")" + + " time=" + StringUtils.formatTimeDiff(etime, stime) + String.format(" %.3fM/sec", + (totalBytesWritten / ((etime - stime) / 1000.0)) / 1048576.0)); context.getCounter(Counter.FILES_COPIED).increment(1); } catch (IOException e) { LOG.error("Error copying " + inputPath + " to " + outputPath, e); @@ -463,12 +457,11 @@ private void copyData(final Context context, } /** - * Try to open the "source" file. - * Throws an IOException if the communication with the inputFs fail or - * if the file is not found. + * Try to open the "source" file. Throws an IOException if the communication with the inputFs + * fail or if the file is not found. */ private FSDataInputStream openSourceFile(Context context, final SnapshotFileInfo fileInfo) - throws IOException { + throws IOException { try { Configuration conf = context.getConfiguration(); FileLink link = null; @@ -520,12 +513,12 @@ private FileStatus getSourceFileStatus(Context context, final SnapshotFileInfo f } } - private FileLink getFileLink(Path path, Configuration conf) throws IOException{ + private FileLink getFileLink(Path path, Configuration conf) throws IOException { String regionName = HFileLink.getReferencedRegionName(path.getName()); TableName tableName = HFileLink.getReferencedTableName(path.getName()); - if(MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) { + if (MobUtils.getMobRegionInfo(tableName).getEncodedName().equals(regionName)) { return HFileLink.buildFromHFileLinkPattern(MobUtils.getQualifiedMobRootDir(conf), - HFileArchiveUtil.getArchivePath(conf), path); + HFileArchiveUtil.getArchivePath(conf), path); } return HFileLink.buildFromHFileLinkPattern(inputRoot, inputArchive, path); } @@ -540,8 +533,8 @@ private FileChecksum getFileChecksum(final FileSystem fs, final Path path) { } /** - * Check if the two files are equal by looking at the file length, - * and at the checksum (if user has specified the verifyChecksum flag). + * Check if the two files are equal by looking at the file length, and at the checksum (if user + * has specified the verifyChecksum flag). */ private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat) { // Not matching length @@ -562,7 +555,7 @@ private boolean sameFile(final FileStatus inputStat, final FileStatus outputStat } // ========================================================================== - // Input Format + // Input Format // ========================================================================== /** @@ -590,9 +583,7 @@ public void storeFile(final RegionInfo regionInfo, final String family, Path path = HFileLink.createPath(table, region, family, hfile); SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() - .setType(SnapshotFileInfo.Type.HFILE) - .setHfile(path.toString()) - .build(); + .setType(SnapshotFileInfo.Type.HFILE).setHfile(path.toString()).build(); long size; if (storeFile.hasFileSize()) { @@ -603,7 +594,7 @@ public void storeFile(final RegionInfo regionInfo, final String family, files.add(new Pair<>(fileInfo, size)); } } - }); + }); return files; } @@ -612,12 +603,11 @@ public void storeFile(final RegionInfo regionInfo, final String family, * Given a list of file paths and sizes, create around ngroups in as balanced a way as possible. * The groups created will have similar amounts of bytes. *

    - * The algorithm used is pretty straightforward; the file list is sorted by size, - * and then each group fetch the bigger file available, iterating through groups - * alternating the direction. + * The algorithm used is pretty straightforward; the file list is sorted by size, and then each + * group fetch the bigger file available, iterating through groups alternating the direction. */ - static List>> getBalancedSplits( - final List> files, final int ngroups) { + static List>> + getBalancedSplits(final List> files, final int ngroups) { // Sort files by size, from small to big Collections.sort(files, new Comparator>() { public int compare(Pair a, Pair b) { @@ -674,7 +664,7 @@ private static class ExportSnapshotInputFormat extends InputFormat createRecordReader(InputSplit split, TaskAttemptContext tac) throws IOException, InterruptedException { - return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit)split).getSplitKeys()); + return new ExportSnapshotRecordReader(((ExportSnapshotInputSplit) split).getSplitKeys()); } @Override @@ -694,7 +684,7 @@ public List getSplits(JobContext context) throws IOException, Interr List>> groups = getBalancedSplits(snapshotFiles, mappers); List splits = new ArrayList(groups.size()); - for (List> files: groups) { + for (List> files : groups) { splits.add(new ExportSnapshotInputSplit(files)); } return splits; @@ -710,9 +700,9 @@ public ExportSnapshotInputSplit() { public ExportSnapshotInputSplit(final List> snapshotFiles) { this.files = new ArrayList(snapshotFiles.size()); - for (Pair fileInfo: snapshotFiles) { - this.files.add(new Pair<>( - new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); + for (Pair fileInfo : snapshotFiles) { + this.files.add( + new Pair<>(new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); this.length += fileInfo.getSecond(); } } @@ -748,7 +738,7 @@ public void readFields(DataInput in) throws IOException { @Override public void write(DataOutput out) throws IOException { out.writeInt(files.size()); - for (final Pair fileInfo: files) { + for (final Pair fileInfo : files) { fileInfo.getFirst().write(out); out.writeLong(fileInfo.getSecond()); } @@ -764,48 +754,55 @@ private static class ExportSnapshotRecordReader ExportSnapshotRecordReader(final List> files) { this.files = files; - for (Pair fileInfo: files) { + for (Pair fileInfo : files) { totalSize += fileInfo.getSecond(); } } @Override - public void close() { } + public void close() { + } @Override - public BytesWritable getCurrentKey() { return files.get(index).getFirst(); } + public BytesWritable getCurrentKey() { + return files.get(index).getFirst(); + } @Override - public NullWritable getCurrentValue() { return NullWritable.get(); } + public NullWritable getCurrentValue() { + return NullWritable.get(); + } @Override - public float getProgress() { return (float)procSize / totalSize; } + public float getProgress() { + return (float) procSize / totalSize; + } @Override - public void initialize(InputSplit split, TaskAttemptContext tac) { } + public void initialize(InputSplit split, TaskAttemptContext tac) { + } @Override public boolean nextKeyValue() { if (index >= 0) { procSize += files.get(index).getSecond(); } - return(++index < files.size()); + return (++index < files.size()); } } } // ========================================================================== - // Tool + // Tool // ========================================================================== /** * Run Map-Reduce Job to perform the files copy. */ - private void runCopyJob(final Path inputRoot, final Path outputRoot, - final String snapshotName, final Path snapshotDir, final boolean verifyChecksum, - final String filesUser, final String filesGroup, final int filesMode, - final int mappers, final int bandwidthMB) - throws IOException, InterruptedException, ClassNotFoundException { + private void runCopyJob(final Path inputRoot, final Path outputRoot, final String snapshotName, + final Path snapshotDir, final boolean verifyChecksum, final String filesUser, + final String filesGroup, final int filesMode, final int mappers, final int bandwidthMB) + throws IOException, InterruptedException, ClassNotFoundException { Configuration conf = getConf(); if (filesGroup != null) conf.set(CONF_FILES_GROUP, filesGroup); if (filesUser != null) conf.set(CONF_FILES_USER, filesUser); @@ -834,11 +831,9 @@ private void runCopyJob(final Path inputRoot, final Path outputRoot, // Acquire the delegation Tokens Configuration srcConf = HBaseConfiguration.createClusterConf(conf, null, CONF_SOURCE_PREFIX); - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { inputRoot }, srcConf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { inputRoot }, srcConf); Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); - TokenCache.obtainTokensForNamenodes(job.getCredentials(), - new Path[] { outputRoot }, destConf); + TokenCache.obtainTokensForNamenodes(job.getCredentials(), new Path[] { outputRoot }, destConf); // Run the MR Job if (!job.waitForCompletion(true)) { @@ -846,8 +841,8 @@ private void runCopyJob(final Path inputRoot, final Path outputRoot, } } - private void verifySnapshot(final Configuration baseConf, - final FileSystem fs, final Path rootDir, final Path snapshotDir) throws IOException { + private void verifySnapshot(final Configuration baseConf, final FileSystem fs, final Path rootDir, + final Path snapshotDir) throws IOException { // Update the conf with the current root dir, since may be a different cluster Configuration conf = new Configuration(baseConf); CommonFSUtils.setRootDir(conf, rootDir); @@ -956,8 +951,8 @@ public int doWork() throws IOException { } if (outputRoot == null) { - System.err.println("Destination file-system (--" + Options.COPY_TO.getLongOpt() - + ") not provided."); + System.err.println( + "Destination file-system (--" + Options.COPY_TO.getLongOpt() + ") not provided."); LOG.error("Use -h or --help for usage instructions."); return 0; } @@ -977,16 +972,17 @@ public int doWork() throws IOException { Configuration destConf = HBaseConfiguration.createClusterConf(conf, null, CONF_DEST_PREFIX); destConf.setBoolean("fs." + outputRoot.toUri().getScheme() + ".impl.disable.cache", true); FileSystem outputFs = FileSystem.get(outputRoot.toUri(), destConf); - boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) || - conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; + boolean skipTmp = conf.getBoolean(CONF_SKIP_TMP, false) + || conf.get(SnapshotDescriptionUtils.SNAPSHOT_WORKING_DIR) != null; Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, inputRoot); - Path snapshotTmpDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, - destConf); - Path outputSnapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); + Path snapshotTmpDir = + SnapshotDescriptionUtils.getWorkingSnapshotDir(targetName, outputRoot, destConf); + Path outputSnapshotDir = + SnapshotDescriptionUtils.getCompletedSnapshotDir(targetName, outputRoot); Path initialOutputSnapshotDir = skipTmp ? outputSnapshotDir : snapshotTmpDir; LOG.debug("inputFs={}, inputRoot={}", inputFs.getUri().toString(), inputRoot); - LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", - outputFs, outputRoot.toString(), skipTmp, initialOutputSnapshotDir); + LOG.debug("outputFs={}, outputRoot={}, skipTmp={}, initialOutputSnapshotDir={}", outputFs, + outputRoot.toString(), skipTmp, initialOutputSnapshotDir); // Verify snapshot source before copying files if (verifySource) { @@ -1016,8 +1012,8 @@ public int doWork() throws IOException { return 1; } } else { - System.err.println("The snapshot '" + targetName + - "' already exists in the destination: " + outputSnapshotDir); + System.err.println("The snapshot '" + targetName + "' already exists in the destination: " + + outputSnapshotDir); return 1; } } @@ -1027,19 +1023,23 @@ public int doWork() throws IOException { if (outputFs.exists(snapshotTmpDir)) { if (overwrite) { if (!outputFs.delete(snapshotTmpDir, true)) { - System.err.println("Unable to remove existing snapshot tmp directory: "+snapshotTmpDir); + System.err + .println("Unable to remove existing snapshot tmp directory: " + snapshotTmpDir); return 1; } } else { - System.err.println("A snapshot with the same name '"+ targetName +"' may be in-progress"); - System.err.println("Please check "+snapshotTmpDir+". If the snapshot has completed, "); - System.err.println("consider removing "+snapshotTmpDir+" by using the -overwrite option"); + System.err + .println("A snapshot with the same name '" + targetName + "' may be in-progress"); + System.err + .println("Please check " + snapshotTmpDir + ". If the snapshot has completed, "); + System.err + .println("consider removing " + snapshotTmpDir + " by using the -overwrite option"); return 1; } } } - // Step 1 - Copy fs1:/.snapshot/ to fs2:/.snapshot/.tmp/ + // Step 1 - Copy fs1:/.snapshot/ to fs2:/.snapshot/.tmp/ // The snapshot references must be copied before the hfiles otherwise the cleaner // will remove them because they are unreferenced. List travesedPaths = new ArrayList<>(); @@ -1048,42 +1048,41 @@ public int doWork() throws IOException { LOG.info("Copy Snapshot Manifest from " + snapshotDir + " to " + initialOutputSnapshotDir); travesedPaths = FSUtils.copyFilesParallel(inputFs, snapshotDir, outputFs, initialOutputSnapshotDir, conf, - conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); + conf.getInt(CONF_COPY_MANIFEST_THREADS, DEFAULT_COPY_MANIFEST_THREADS)); copySucceeded = true; } catch (IOException e) { - throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + - snapshotDir + " to=" + initialOutputSnapshotDir, e); + throw new ExportSnapshotException("Failed to copy the snapshot directory: from=" + snapshotDir + + " to=" + initialOutputSnapshotDir, e); } finally { if (copySucceeded) { if (filesUser != null || filesGroup != null) { - LOG.warn((filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " - + filesUser) - + (filesGroup == null ? "" : ", Change the group of " + needSetOwnerDir + " to " - + filesGroup)); + LOG.warn( + (filesUser == null ? "" : "Change the owner of " + needSetOwnerDir + " to " + filesUser) + + (filesGroup == null ? "" + : ", Change the group of " + needSetOwnerDir + " to " + filesGroup)); setOwnerParallel(outputFs, filesUser, filesGroup, conf, travesedPaths); } if (filesMode > 0) { LOG.warn("Change the permission of " + needSetOwnerDir + " to " + filesMode); - setPermissionParallel(outputFs, (short)filesMode, travesedPaths, conf); + setPermissionParallel(outputFs, (short) filesMode, travesedPaths, conf); } } } // Write a new .snapshotinfo if the target name is different from the source name if (!targetName.equals(snapshotName)) { - SnapshotDescription snapshotDesc = - SnapshotDescriptionUtils.readSnapshotInfo(inputFs, snapshotDir) - .toBuilder() - .setName(targetName) - .build(); + SnapshotDescription snapshotDesc = SnapshotDescriptionUtils + .readSnapshotInfo(inputFs, snapshotDir).toBuilder().setName(targetName).build(); SnapshotDescriptionUtils.writeSnapshotInfo(snapshotDesc, initialOutputSnapshotDir, outputFs); if (filesUser != null || filesGroup != null) { - outputFs.setOwner(new Path(initialOutputSnapshotDir, - SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, filesGroup); + outputFs.setOwner( + new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), filesUser, + filesGroup); } if (filesMode > 0) { - outputFs.setPermission(new Path(initialOutputSnapshotDir, - SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), new FsPermission((short)filesMode)); + outputFs.setPermission( + new Path(initialOutputSnapshotDir, SnapshotDescriptionUtils.SNAPSHOTINFO_FILE), + new FsPermission((short) filesMode)); } } @@ -1091,15 +1090,15 @@ public int doWork() throws IOException { // The snapshot references must be copied before the files otherwise the files gets removed // by the HFileArchiver, since they have no references. try { - runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, - filesUser, filesGroup, filesMode, mappers, bandwidthMB); + runCopyJob(inputRoot, outputRoot, snapshotName, snapshotDir, verifyChecksum, filesUser, + filesGroup, filesMode, mappers, bandwidthMB); LOG.info("Finalize the Snapshot Export"); if (!skipTmp) { // Step 3 - Rename fs2:/.snapshot/.tmp/ fs2:/.snapshot/ if (!outputFs.rename(snapshotTmpDir, outputSnapshotDir)) { - throw new ExportSnapshotException("Unable to rename snapshot directory from=" + - snapshotTmpDir + " to=" + outputSnapshotDir); + throw new ExportSnapshotException("Unable to rename snapshot directory from=" + + snapshotTmpDir + " to=" + outputSnapshotDir); } } @@ -1127,18 +1126,16 @@ public int doWork() throws IOException { @Override protected void printUsage() { super.printUsage(); - System.out.println("\n" - + "Examples:\n" - + " hbase snapshot export \\\n" + System.out.println("\n" + "Examples:\n" + " hbase snapshot export \\\n" + " --snapshot MySnapshot --copy-to hdfs://srv2:8082/hbase \\\n" - + " --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n" - + "\n" + + " --chuser MyUser --chgroup MyGroup --chmod 700 --mappers 16\n" + "\n" + " hbase snapshot export \\\n" + " --snapshot MySnapshot --copy-from hdfs://srv2:8082/hbase \\\n" + " --copy-to hdfs://srv1:50070/hbase"); } - @Override protected void addOptions() { + @Override + protected void addOptions() { addRequiredOption(Options.SNAPSHOT); addOption(Options.COPY_TO); addOption(Options.COPY_FROM); diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java index 9432f309adb6..c97da439e751 100644 --- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java +++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/util/MapreduceDependencyClasspathTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,8 +27,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Generate a classpath string containing any jars required by mapreduce jobs. Specify - * additional values by providing a comma-separated list of paths via -Dtmpjars. + * Generate a classpath string containing any jars required by mapreduce jobs. Specify additional + * values by providing a comma-separated list of paths via -Dtmpjars. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class MapreduceDependencyClasspathTool implements Tool { @@ -49,8 +49,10 @@ public Configuration getConf() { public int run(String[] args) throws Exception { if (args.length > 0) { System.err.println("Usage: hbase mapredcp [-Dtmpjars=...]"); - System.err.println(" Construct a CLASSPATH containing dependency jars required to run a mapreduce"); - System.err.println(" job. By default, includes any jars detected by TableMapReduceUtils. Provide"); + System.err.println( + " Construct a CLASSPATH containing dependency jars required to run a mapreduce"); + System.err + .println(" job. By default, includes any jars detected by TableMapReduceUtils. Provide"); System.err.println(" additional entries by specifying a comma-separated list in tmpjars."); return 0; } @@ -63,7 +65,7 @@ public int run(String[] args) throws Exception { public static void main(String[] argv) throws Exception { // Silence the usual noise. This is probably fragile... Log4jUtils.setLogLevel("org.apache.hadoop.hbase", "WARN"); - System.exit(ToolRunner.run( - HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv)); + System.exit( + ToolRunner.run(HBaseConfiguration.create(), new MapreduceDependencyClasspathTool(), argv)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java index 417349dcf859..d19ff621d5c6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -114,21 +113,18 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; /** - * Script used evaluating HBase performance and scalability. Runs a HBase - * client that steps through one of a set of hardcoded tests or 'experiments' - * (e.g. a random reads test, a random writes test, etc.). Pass on the - * command-line which test to run and how many clients are participating in - * this experiment. Run {@code PerformanceEvaluation --help} to obtain usage. - * - *

    This class sets up and runs the evaluation programs described in - * Section 7, Performance Evaluation, of the Bigtable - * paper, pages 8-10. - * - *

    By default, runs as a mapreduce job where each mapper runs a single test - * client. Can also run as a non-mapreduce, multithreaded application by - * specifying {@code --nomapred}. Each client does about 1GB of data, unless - * specified otherwise. + * Script used evaluating HBase performance and scalability. Runs a HBase client that steps through + * one of a set of hardcoded tests or 'experiments' (e.g. a random reads test, a random writes test, + * etc.). Pass on the command-line which test to run and how many clients are participating in this + * experiment. Run {@code PerformanceEvaluation --help} to obtain usage. + *

    + * This class sets up and runs the evaluation programs described in Section 7, Performance + * Evaluation, of the Bigtable paper, + * pages 8-10. + *

    + * By default, runs as a mapreduce job where each mapper runs a single test client. Can also run as + * a non-mapreduce, multithreaded application by specifying {@code --nomapred}. Each client does + * about 1GB of data, unless specified otherwise. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class PerformanceEvaluation extends Configured implements Tool { @@ -167,11 +163,9 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run async sequential read test"); addCommandDescriptor(AsyncSequentialWriteTest.class, "asyncSequentialWrite", "Run async sequential write test"); - addCommandDescriptor(AsyncScanTest.class, "asyncScan", - "Run async scan test (read every row)"); + addCommandDescriptor(AsyncScanTest.class, "asyncScan", "Run async scan test (read every row)"); addCommandDescriptor(RandomReadTest.class, RANDOM_READ, "Run random read test"); - addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", - "Run getRegionLocation test"); + addCommandDescriptor(MetaRandomReadTest.class, "metaRandomRead", "Run getRegionLocation test"); addCommandDescriptor(RandomSeekScanTest.class, RANDOM_SEEK_SCAN, "Run random seek and scan 100 test"); addCommandDescriptor(RandomScanWithRange10Test.class, "scanRange10", @@ -182,18 +176,15 @@ public class PerformanceEvaluation extends Configured implements Tool { "Run random seek scan with both start and stop row (max 1000 rows)"); addCommandDescriptor(RandomScanWithRange10000Test.class, "scanRange10000", "Run random seek scan with both start and stop row (max 10000 rows)"); - addCommandDescriptor(RandomWriteTest.class, "randomWrite", - "Run random write test"); - addCommandDescriptor(SequentialReadTest.class, "sequentialRead", - "Run sequential read test"); - addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", - "Run sequential write test"); + addCommandDescriptor(RandomWriteTest.class, "randomWrite", "Run random write test"); + addCommandDescriptor(SequentialReadTest.class, "sequentialRead", "Run sequential read test"); + addCommandDescriptor(SequentialWriteTest.class, "sequentialWrite", "Run sequential write test"); addCommandDescriptor(MetaWriteTest.class, "metaWrite", "Populate meta table;used with 1 thread; to be cleaned up by cleanMeta"); addCommandDescriptor(ScanTest.class, "scan", "Run scan test (read every row)"); addCommandDescriptor(FilteredScanTest.class, "filterScan", - "Run scan test using a filter to find a specific row based on it's value " + - "(make sure to use --rows=20)"); + "Run scan test using a filter to find a specific row based on it's value " + + "(make sure to use --rows=20)"); addCommandDescriptor(IncrementTest.class, "increment", "Increment on each row; clients overlap on keyspace so some concurrent operations"); addCommandDescriptor(AppendTest.class, "append", @@ -209,8 +200,8 @@ public class PerformanceEvaluation extends Configured implements Tool { } /** - * Enum for map metrics. Keep it out here rather than inside in the Map - * inner-class so we can find associated properties. + * Enum for map metrics. Keep it out here rather than inside in the Map inner-class so we can find + * associated properties. */ protected static enum Counter { /** elapsed time */ @@ -228,7 +219,7 @@ public RunResult(long duration, Histogram hist) { } public RunResult(long duration, long numbOfReplyOverThreshold, long numOfReplyFromReplica, - Histogram hist) { + Histogram hist) { this.duration = duration; this.hist = hist; this.numbOfReplyOverThreshold = numbOfReplyOverThreshold; @@ -245,7 +236,8 @@ public String toString() { return Long.toString(duration); } - @Override public int compareTo(RunResult o) { + @Override + public int compareTo(RunResult o) { return Long.compare(this.duration, o.duration); } } @@ -258,8 +250,8 @@ public PerformanceEvaluation(final Configuration conf) { super(conf); } - protected static void addCommandDescriptor(Class cmdClass, - String name, String description) { + protected static void addCommandDescriptor(Class cmdClass, String name, + String description) { CmdDescriptor cmdDescriptor = new CmdDescriptor(cmdClass, name, description); COMMANDS.put(name, cmdDescriptor); } @@ -314,12 +306,12 @@ private Class forName(String className, Class type) @Override protected void map(LongWritable key, Text value, final Context context) - throws IOException, InterruptedException { + throws IOException, InterruptedException { Status status = new Status() { @Override public void setStatus(String msg) { - context.setStatus(msg); + context.setStatus(msg); } }; @@ -334,7 +326,8 @@ public void setStatus(String msg) { } // Evaluation task - RunResult result = PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); + RunResult result = + PerformanceEvaluation.runOneClient(this.cmd, conf, con, asyncCon, opts, status); // Collect how much time the thing took. Report as map output and // to the ELAPSED_TIME counter. context.getCounter(Counter.ELAPSED_TIME).increment(result.duration); @@ -345,43 +338,37 @@ public void setStatus(String msg) { } /* - * If table does not already exist, create. Also create a table when - * {@code opts.presplitRegions} is specified or when the existing table's - * region replica count doesn't match {@code opts.replicas}. + * If table does not already exist, create. Also create a table when {@code opts.presplitRegions} + * is specified or when the existing table's region replica count doesn't match {@code + * opts.replicas}. */ static boolean checkTable(Admin admin, TestOptions opts) throws IOException { TableName tableName = TableName.valueOf(opts.tableName); boolean needsDelete = false, exists = admin.tableExists(tableName); boolean isReadCmd = opts.cmdName.toLowerCase(Locale.ROOT).contains("read") - || opts.cmdName.toLowerCase(Locale.ROOT).contains("scan"); + || opts.cmdName.toLowerCase(Locale.ROOT).contains("scan"); if (!exists && isReadCmd) { throw new IllegalStateException( - "Must specify an existing table for read commands. Run a write command first."); + "Must specify an existing table for read commands. Run a write command first."); } HTableDescriptor desc = - exists ? admin.getTableDescriptor(TableName.valueOf(opts.tableName)) : null; + exists ? admin.getTableDescriptor(TableName.valueOf(opts.tableName)) : null; byte[][] splits = getSplits(opts); // recreate the table when user has requested presplit or when existing // {RegionSplitPolicy,replica count} does not match requested, or when the // number of column families does not match requested. if ((exists && opts.presplitRegions != DEFAULT_OPTS.presplitRegions) - || (!isReadCmd && desc != null && - !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) - || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) - || (desc != null && desc.getColumnFamilyCount() != opts.families)) { + || (!isReadCmd && desc != null + && !StringUtils.equals(desc.getRegionSplitPolicyClassName(), opts.splitPolicy)) + || (!isReadCmd && desc != null && desc.getRegionReplication() != opts.replicas) + || (desc != null && desc.getColumnFamilyCount() != opts.families)) { needsDelete = true; // wait, why did it delete my table?!? - LOG.debug(MoreObjects.toStringHelper("needsDelete") - .add("needsDelete", needsDelete) - .add("isReadCmd", isReadCmd) - .add("exists", exists) - .add("desc", desc) - .add("presplit", opts.presplitRegions) - .add("splitPolicy", opts.splitPolicy) - .add("replicas", opts.replicas) - .add("families", opts.families) - .toString()); + LOG.debug(MoreObjects.toStringHelper("needsDelete").add("needsDelete", needsDelete) + .add("isReadCmd", isReadCmd).add("exists", exists).add("desc", desc) + .add("presplit", opts.presplitRegions).add("splitPolicy", opts.splitPolicy) + .add("replicas", opts.replicas).add("families", opts.families).toString()); } // remove an existing table @@ -439,8 +426,7 @@ protected static HTableDescriptor getTableDescriptor(TestOptions opts) { * generates splits based on total number of rows and specified split regions */ protected static byte[][] getSplits(TestOptions opts) { - if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions) - return null; + if (opts.presplitRegions == DEFAULT_OPTS.presplitRegions) return null; int numSplitPoints = opts.presplitRegions - 1; byte[][] splits = new byte[numSplitPoints][]; @@ -482,8 +468,8 @@ static RunResult[] doLocalClients(final TestOptions opts, final Configuration co cons[i] = ConnectionFactory.createConnection(conf); asyncCons[i] = ConnectionFactory.createAsyncConnection(conf).get(); } - LOG.info("Created " + opts.connCount + " connections for " + - opts.numClientThreads + " threads"); + LOG.info( + "Created " + opts.connCount + " connections for " + opts.numClientThreads + " threads"); for (int i = 0; i < threads.length; i++) { final int index = i; threads[i] = pool.submit(new Callable() { @@ -499,11 +485,11 @@ public void setStatus(final String msg) throws IOException { LOG.info(msg); } }); - LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + - "ms over " + threadOpts.perClientRunRows + " rows"); + LOG.info("Finished " + Thread.currentThread().getName() + " in " + run.duration + + "ms over " + threadOpts.perClientRunRows + " rows"); if (opts.latencyThreshold > 0) { - LOG.info("Number of replies over latency threshold " + opts.latencyThreshold + - "(ms) is " + run.numbOfReplyOverThreshold); + LOG.info("Number of replies over latency threshold " + opts.latencyThreshold + + "(ms) is " + run.numbOfReplyOverThreshold); } return run; } @@ -519,11 +505,10 @@ public void setStatus(final String msg) throws IOException { } } final String test = cmd.getSimpleName(); - LOG.info("[" + test + "] Summary of timings (ms): " - + Arrays.toString(results)); + LOG.info("[" + test + "] Summary of timings (ms): " + Arrays.toString(results)); Arrays.sort(results); long total = 0; - float avgLatency = 0 ; + float avgLatency = 0; float avgTPS = 0; long replicaWins = 0; for (RunResult result : results) { @@ -534,10 +519,8 @@ public void setStatus(final String msg) throws IOException { } avgTPS *= 1000; // ms to second avgLatency = avgLatency / results.length; - LOG.info("[" + test + " duration ]" - + "\tMin: " + results[0] + "ms" - + "\tMax: " + results[results.length - 1] + "ms" - + "\tAvg: " + (total / results.length) + "ms"); + LOG.info("[" + test + " duration ]" + "\tMin: " + results[0] + "ms" + "\tMax: " + + results[results.length - 1] + "ms" + "\tAvg: " + (total / results.length) + "ms"); LOG.info("[ Avg latency (us)]\t" + Math.round(avgLatency)); LOG.info("[ Avg TPS/QPS]\t" + Math.round(avgTPS) + "\t row per second"); if (opts.replicas > 1) { @@ -553,9 +536,8 @@ public void setStatus(final String msg) throws IOException { } /* - * Run a mapreduce job. Run as many maps as asked-for clients. - * Before we start up the job, write out an input file with instruction - * per client regards which row they are to start on. + * Run a mapreduce job. Run as many maps as asked-for clients. Before we start up the job, write + * out an input file with instruction per client regards which row they are to start on. * @param cmd Command to run. * @throws IOException */ @@ -587,11 +569,11 @@ static Job doMapReduce(TestOptions opts, final Configuration conf) TextOutputFormat.setOutputPath(job, new Path(inputDir.getParent(), "outputs")); TableMapReduceUtil.addDependencyJars(job); - TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - Histogram.class, // yammer metrics - Gson.class, // gson + TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), Histogram.class, // yammer + // metrics + Gson.class, // gson FilterAllFilter.class // hbase-server tests jar - ); + ); TableMapReduceUtil.initCredentials(job); @@ -600,7 +582,7 @@ static Job doMapReduce(TestOptions opts, final Configuration conf) } /** - * Each client has one mapper to do the work, and client do the resulting count in a map task. + * Each client has one mapper to do the work, and client do the resulting count in a map task. */ static String JOB_INPUT_FILENAME = "input.txt"; @@ -616,7 +598,7 @@ static Path writeInputFile(final Configuration c, final TestOptions opts) throws } static Path writeInputFile(final Configuration c, final TestOptions opts, final Path basedir) - throws IOException { + throws IOException { SimpleDateFormat formatter = new SimpleDateFormat("yyyyMMddHHmmss"); Path jobdir = new Path(new Path(basedir, PERF_EVAL_DIR), formatter.format(new Date())); Path inputDir = new Path(jobdir, "inputs"); @@ -641,7 +623,7 @@ static Path writeInputFile(final Configuration c, final TestOptions opts, final int hash = h.hash(new ByteArrayHashKey(b, 0, b.length), -1); m.put(hash, s); } - for (Map.Entry e: m.entrySet()) { + for (Map.Entry e : m.entrySet()) { out.println(e.getValue()); } } finally { @@ -678,11 +660,11 @@ public String getDescription() { } /** - * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}. - * This makes tracking all these arguments a little easier. - * NOTE: ADDING AN OPTION, you need to add a data member, a getter/setter (to make JSON - * serialization of this TestOptions class behave), and you need to add to the clone constructor - * below copying your new option from the 'that' to the 'this'. Look for 'clone' below. + * Wraps up options passed to {@link org.apache.hadoop.hbase.PerformanceEvaluation}. This makes + * tracking all these arguments a little easier. NOTE: ADDING AN OPTION, you need to add a data + * member, a getter/setter (to make JSON serialization of this TestOptions class behave), and you + * need to add to the clone constructor below copying your new option from the 'that' to the + * 'this'. Look for 'clone' below. */ static class TestOptions { String cmdName = null; @@ -705,7 +687,7 @@ static class TestOptions { boolean writeToWAL = true; boolean autoFlush = false; boolean oneCon = false; - int connCount = -1; //wil decide the actual num later + int connCount = -1; // wil decide the actual num later boolean useTags = false; int noOfTags = 1; boolean reportLatency = false; @@ -723,7 +705,7 @@ static class TestOptions { boolean valueRandom = false; boolean valueZipf = false; int valueSize = DEFAULT_VALUE_LENGTH; - int period = (this.perClientRunRows / 10) == 0? perClientRunRows: perClientRunRows / 10; + int period = (this.perClientRunRows / 10) == 0 ? perClientRunRows : perClientRunRows / 10; int cycles = 1; int columns = 1; int families = 1; @@ -731,14 +713,14 @@ static class TestOptions { int latencyThreshold = 0; // in millsecond boolean addColumns = true; MemoryCompactionPolicy inMemoryCompaction = - MemoryCompactionPolicy.valueOf( - CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); + MemoryCompactionPolicy.valueOf(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT); boolean asyncPrefetch = false; boolean cacheBlocks = true; Scan.ReadType scanReadType = Scan.ReadType.DEFAULT; long bufferSize = 2l * 1024l * 1024l; - public TestOptions() {} + public TestOptions() { + } /** * Clone constructor. @@ -1131,8 +1113,7 @@ public long getBufferSize() { } /* - * A test. - * Subclass to particularize what happens per row. + * A test. Subclass to particularize what happens per row. */ static abstract class TestBase { // Below is make it so when Tests are all running in the one @@ -1142,6 +1123,7 @@ static abstract class TestBase { private static long nextRandomSeed() { return randomSeed.nextLong(); } + private final int everyN; protected final Random rand = new Random(nextRandomSeed()); @@ -1165,8 +1147,8 @@ private static long nextRandomSeed() { private long numOfReplyFromReplica = 0; /** - * Note that all subclasses of this class must provide a public constructor - * that has the exact same list of arguments. + * Note that all subclasses of this class must provide a public constructor that has the exact + * same list of arguments. */ TestBase(final Configuration conf, final TestOptions options, final Status status) { this.conf = conf; @@ -1190,13 +1172,14 @@ int getValueLength(final Random r) { } } - void updateValueSize(final Result [] rs) throws IOException { + void updateValueSize(final Result[] rs) throws IOException { updateValueSize(rs, 0); } - void updateValueSize(final Result [] rs, final long latency) throws IOException { + void updateValueSize(final Result[] rs, final long latency) throws IOException { if (rs == null || (latency == 0)) return; - for (Result r: rs) updateValueSize(r, latency); + for (Result r : rs) + updateValueSize(r, latency); } void updateValueSize(final Result r) throws IOException { @@ -1209,7 +1192,7 @@ void updateValueSize(final Result r, final long latency) throws IOException { // update replicaHistogram if (r.isStale()) { replicaLatencyHistogram.update(latency / 1000); - numOfReplyFromReplica ++; + numOfReplyFromReplica++; } if (!isRandomValueSize()) return; @@ -1226,7 +1209,7 @@ void updateValueSize(final int valueSize) { void updateScanMetrics(final ScanMetrics metrics) { if (metrics == null) return; - Map metricsMap = metrics.getMetricsMap(); + Map metricsMap = metrics.getMetricsMap(); Long rpcCalls = metricsMap.get(ScanMetrics.RPC_CALLS_METRIC_NAME); if (rpcCalls != null) { this.rpcCallsHistogram.update(rpcCalls.longValue()); @@ -1254,8 +1237,8 @@ void updateScanMetrics(final ScanMetrics metrics) { } String generateStatus(final int sr, final int i, final int lr) { - return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() + - (!isRandomValueSize()? "": ", value size " + getShortValueSizeReport()); + return sr + "/" + i + "/" + lr + ", latency " + getShortLatencyReport() + + (!isRandomValueSize() ? "" : ", value size " + getShortValueSizeReport()); } boolean isRandomValueSize() { @@ -1278,16 +1261,19 @@ void testSetup() throws IOException { latencyHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); // If it is a replica test, set up histogram for replica. if (opts.replicas > 1) { - replicaLatencyHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + replicaLatencyHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); } valueSizeHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); // scan metrics rpcCallsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); remoteRpcCallsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); - millisBetweenNextHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + millisBetweenNextHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); regionsScannedHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); bytesInResultsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); - bytesInRemoteResultsHistogram = YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); + bytesInRemoteResultsHistogram = + YammerHistogramUtils.newHistogram(new UniformReservoir(1024 * 500)); onStartup(); } @@ -1301,52 +1287,51 @@ void testTakedown() throws IOException { // output. We can't use 'this' here because each thread has its own instance of Test class. synchronized (Test.class) { status.setStatus("Test : " + testName + ", Thread : " + Thread.currentThread().getName()); - status.setStatus("Latency (us) : " + YammerHistogramUtils.getHistogramReport( - latencyHistogram)); + status.setStatus( + "Latency (us) : " + YammerHistogramUtils.getHistogramReport(latencyHistogram)); if (opts.replicas > 1) { - status.setStatus("Latency (us) from Replica Regions: " + - YammerHistogramUtils.getHistogramReport(replicaLatencyHistogram)); + status.setStatus("Latency (us) from Replica Regions: " + + YammerHistogramUtils.getHistogramReport(replicaLatencyHistogram)); } status.setStatus("Num measures (latency) : " + latencyHistogram.getCount()); status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(latencyHistogram)); if (valueSizeHistogram.getCount() > 0) { - status.setStatus("ValueSize (bytes) : " - + YammerHistogramUtils.getHistogramReport(valueSizeHistogram)); + status.setStatus( + "ValueSize (bytes) : " + YammerHistogramUtils.getHistogramReport(valueSizeHistogram)); status.setStatus("Num measures (ValueSize): " + valueSizeHistogram.getCount()); status.setStatus(YammerHistogramUtils.getPrettyHistogramReport(valueSizeHistogram)); } else { status.setStatus("No valueSize statistics available"); } if (rpcCallsHistogram.getCount() > 0) { - status.setStatus("rpcCalls (count): " + - YammerHistogramUtils.getHistogramReport(rpcCallsHistogram)); + status.setStatus( + "rpcCalls (count): " + YammerHistogramUtils.getHistogramReport(rpcCallsHistogram)); } if (remoteRpcCallsHistogram.getCount() > 0) { - status.setStatus("remoteRpcCalls (count): " + - YammerHistogramUtils.getHistogramReport(remoteRpcCallsHistogram)); + status.setStatus("remoteRpcCalls (count): " + + YammerHistogramUtils.getHistogramReport(remoteRpcCallsHistogram)); } if (millisBetweenNextHistogram.getCount() > 0) { - status.setStatus("millisBetweenNext (latency): " + - YammerHistogramUtils.getHistogramReport(millisBetweenNextHistogram)); + status.setStatus("millisBetweenNext (latency): " + + YammerHistogramUtils.getHistogramReport(millisBetweenNextHistogram)); } if (regionsScannedHistogram.getCount() > 0) { - status.setStatus("regionsScanned (count): " + - YammerHistogramUtils.getHistogramReport(regionsScannedHistogram)); + status.setStatus("regionsScanned (count): " + + YammerHistogramUtils.getHistogramReport(regionsScannedHistogram)); } if (bytesInResultsHistogram.getCount() > 0) { - status.setStatus("bytesInResults (size): " + - YammerHistogramUtils.getHistogramReport(bytesInResultsHistogram)); + status.setStatus("bytesInResults (size): " + + YammerHistogramUtils.getHistogramReport(bytesInResultsHistogram)); } if (bytesInRemoteResultsHistogram.getCount() > 0) { - status.setStatus("bytesInRemoteResults (size): " + - YammerHistogramUtils.getHistogramReport(bytesInRemoteResultsHistogram)); + status.setStatus("bytesInRemoteResults (size): " + + YammerHistogramUtils.getHistogramReport(bytesInRemoteResultsHistogram)); } } } abstract void onTakedown() throws IOException; - /* * Run test * @return Elapsed time. @@ -1386,12 +1371,12 @@ void testTimed() throws IOException, InterruptedException { long startTime = System.nanoTime(); boolean requestSent = false; Span span = TraceUtil.getGlobalTracer().spanBuilder("test row").startSpan(); - try (Scope scope = span.makeCurrent()){ + try (Scope scope = span.makeCurrent()) { requestSent = testRow(i, startTime); } finally { span.end(); } - if ( (i - startRow) > opts.measureAfter) { + if ((i - startRow) > opts.measureAfter) { // If multiget or multiput is enabled, say set to 10, testRow() returns immediately // first 9 times and sends the actual get request in the 10th iteration. // We should only set latency when actual request is sent because otherwise @@ -1400,7 +1385,7 @@ void testTimed() throws IOException, InterruptedException { long latency = (System.nanoTime() - startTime) / 1000; latencyHistogram.update(latency); if ((opts.latencyThreshold > 0) && (latency / 1000 >= opts.latencyThreshold)) { - numOfReplyOverLatencyThreshold ++; + numOfReplyOverLatencyThreshold++; } } if (status != null && i > 0 && (i % getReportingPeriod()) == 0) { @@ -1425,15 +1410,14 @@ public String getShortValueSizeReport() { return YammerHistogramUtils.getShortHistogramReport(this.valueSizeHistogram); } - /** * Test for individual row. * @param i Row index. - * @return true if the row was sent to server and need to record metrics. - * False if not, multiGet and multiPut e.g., the rows are sent - * to server only if enough gets/puts are gathered. + * @return true if the row was sent to server and need to record metrics. False if not, multiGet + * and multiPut e.g., the rows are sent to server only if enough gets/puts are gathered. */ - abstract boolean testRow(final int i, final long startTime) throws IOException, InterruptedException; + abstract boolean testRow(final int i, final long startTime) + throws IOException, InterruptedException; } static abstract class Test extends TestBase { @@ -1473,7 +1457,7 @@ void onTakedown() throws IOException { } /* - Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest + * Parent class for all meta tests: MetaWriteTest, MetaRandomReadTest and CleanMetaTest */ static abstract class MetaTest extends TableTest { protected int keyLength; @@ -1489,7 +1473,7 @@ void onTakedown() throws IOException { } /* - Generates Lexicographically ascending strings + * Generates Lexicographically ascending strings */ protected byte[] getSplitKey(final int i) { return Bytes.toBytes(String.format("%0" + keyLength + "d", i)); @@ -1537,7 +1521,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1553,8 +1537,8 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt if (opts.multiGet > 0) { this.gets.add(get); if (this.gets.size() == opts.multiGet) { - Result[] rs = - this.table.get(this.gets).stream().map(f -> propagate(f::get)).toArray(Result[]::new); + Result[] rs = this.table.get(this.gets).stream().map(f -> propagate(f::get)) + .toArray(Result[]::new); updateValueSize(rs); this.gets.clear(); } else { @@ -1622,9 +1606,8 @@ static class AsyncScanTest extends AsyncTableTest { @Override void onStartup() throws IOException { - this.asyncTable = - connection.getTable(TableName.valueOf(opts.tableName), - Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())); + this.asyncTable = connection.getTable(TableName.valueOf(opts.tableName), + Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())); } @Override @@ -1639,15 +1622,14 @@ void testTakedown() throws IOException { @Override boolean testRow(final int i, final long startTime) throws IOException { if (this.testScanner == null) { - Scan scan = - new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) - .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) - .setReadType(opts.scanReadType).setScanMetricsEnabled(true); + Scan scan = new Scan().withStartRow(format(opts.startRow)).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1677,7 +1659,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1719,7 +1701,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); @@ -1728,8 +1710,8 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { @@ -1789,16 +1771,16 @@ static class RandomSeekScanTest extends TableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - Scan scan = new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)) - .setCaching(opts.caching).setCacheBlocks(opts.cacheBlocks) - .setAsyncPrefetch(opts.asyncPrefetch).setReadType(opts.scanReadType) - .setScanMetricsEnabled(true); + Scan scan = + new Scan().withStartRow(getRandomRow(this.rand, opts.totalRows)).setCaching(opts.caching) + .setCacheBlocks(opts.cacheBlocks).setAsyncPrefetch(opts.asyncPrefetch) + .setReadType(opts.scanReadType).setScanMetricsEnabled(true); FilterList list = new FilterList(); for (int family = 0; family < opts.families; family++) { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1846,7 +1828,7 @@ boolean testRow(final int i, final long startTime) throws IOException { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -1866,8 +1848,8 @@ boolean testRow(final int i, final long startTime) throws IOException { } if (i % 100 == 0) { LOG.info(String.format("Scan for key range %s - %s returned %s rows", - Bytes.toString(startAndStopRow.getFirst()), - Bytes.toString(startAndStopRow.getSecond()), count)); + Bytes.toString(startAndStopRow.getFirst()), Bytes.toString(startAndStopRow.getSecond()), + count)); } } finally { updateScanMetrics(s.getScanMetrics()); @@ -1876,7 +1858,7 @@ boolean testRow(final int i, final long startTime) throws IOException { return true; } - protected abstract Pair getStartAndStopRow(); + protected abstract Pair getStartAndStopRow(); protected Pair generateStartAndStopRows(int maxRange) { int start = this.rand.nextInt(Integer.MAX_VALUE) % opts.totalRows; @@ -1887,7 +1869,7 @@ protected Pair generateStartAndStopRows(int maxRange) { @Override protected int getReportingPeriod() { int period = opts.perClientRunRows / 100; - return period == 0? opts.perClientRunRows: period; + return period == 0 ? opts.perClientRunRows : period; } } @@ -1958,7 +1940,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -1973,7 +1955,7 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt if (opts.multiGet > 0) { this.gets.add(get); if (this.gets.size() == opts.multiGet) { - Result [] rs = this.table.get(this.gets); + Result[] rs = this.table.get(this.gets); if (opts.replicas > 1) { long latency = System.nanoTime() - startTime; updateValueSize(rs, latency); @@ -2013,7 +1995,7 @@ protected void testTakedown() throws IOException { } /* - Send random reads against fake regions inserted by MetaWriteTest + * Send random reads against fake regions inserted by MetaWriteTest */ static class MetaRandomReadTest extends MetaTest { private Random rd = new Random(); @@ -2035,8 +2017,8 @@ boolean testRow(final int i, final long startTime) throws IOException, Interrupt if (opts.randomSleep > 0) { Thread.sleep(rd.nextInt(opts.randomSleep)); } - HRegionLocation hRegionLocation = regionLocator.getRegionLocation( - getSplitKey(rd.nextInt(opts.perClientRunRows)), true); + HRegionLocation hRegionLocation = + regionLocator.getRegionLocation(getSplitKey(rd.nextInt(opts.perClientRunRows)), true); LOG.debug("get location for region: " + hRegionLocation); return true; } @@ -2063,7 +2045,6 @@ protected byte[] generateRow(final int i) { return getRandomRow(this.rand, opts.totalRows); } - } static class ScanTest extends TableTest { @@ -2081,7 +2062,6 @@ void testTakedown() throws IOException { super.testTakedown(); } - @Override boolean testRow(final int i, final long startTime) throws IOException { if (this.testScanner == null) { @@ -2092,7 +2072,7 @@ boolean testRow(final int i, final long startTime) throws IOException { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(familyName, qualifier); } } else { @@ -2113,19 +2093,20 @@ boolean testRow(final int i, final long startTime) throws IOException { /** * Base class for operations that are CAS-like; that read a value and then set it based off what * they read. In this category is increment, append, checkAndPut, etc. - * - *

    These operations also want some concurrency going on. Usually when these tests run, they + *

    + * These operations also want some concurrency going on. Usually when these tests run, they * operate in their own part of the key range. In CASTest, we will have them all overlap on the * same key space. We do this with our getStartRow and getLastRow overrides. */ static abstract class CASTableTest extends TableTest { - private final byte [] qualifier; + private final byte[] qualifier; + CASTableTest(Connection con, TestOptions options, Status status) { super(con, options, status); qualifier = Bytes.toBytes(this.getClass().getSimpleName()); } - byte [] getQualifier() { + byte[] getQualifier() { return this.qualifier; } @@ -2167,7 +2148,7 @@ static class AppendTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - byte [] bytes = format(i); + byte[] bytes = format(i); Append append = new Append(bytes); // unlike checkAndXXX tests, which make most sense to do on a single value, // if multiple families are specified for an append test we assume it is @@ -2188,7 +2169,7 @@ static class CheckAndMutateTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); @@ -2196,8 +2177,8 @@ boolean testRow(final int i, final long startTime) throws IOException { this.table.put(put); RowMutations mutations = new RowMutations(bytes); mutations.add(put); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenMutate(mutations); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenMutate(mutations); return true; } } @@ -2209,14 +2190,14 @@ static class CheckAndPutTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); put.addColumn(FAMILY_ZERO, getQualifier(), bytes); this.table.put(put); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenPut(put); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenPut(put); return true; } } @@ -2228,7 +2209,7 @@ static class CheckAndDeleteTest extends CASTableTest { @Override boolean testRow(final int i, final long startTime) throws IOException { - final byte [] bytes = format(i); + final byte[] bytes = format(i); // checkAndXXX tests operate on only a single value // Put a known value so when we go to check it, it is there. Put put = new Put(bytes); @@ -2236,14 +2217,14 @@ boolean testRow(final int i, final long startTime) throws IOException { this.table.put(put); Delete delete = new Delete(put.getRow()); delete.addColumn(FAMILY_ZERO, getQualifier()); - this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()) - .ifEquals(bytes).thenDelete(delete); + this.table.checkAndMutate(bytes, FAMILY_ZERO).qualifier(getQualifier()).ifEquals(bytes) + .thenDelete(delete); return true; } } /* - Delete all fake regions inserted to meta table by MetaWriteTest. + * Delete all fake regions inserted to meta table by MetaWriteTest. */ static class CleanMetaTest extends MetaTest { CleanMetaTest(Connection con, TestOptions options, Status status) { @@ -2254,11 +2235,11 @@ static class CleanMetaTest extends MetaTest { boolean testRow(final int i, final long startTime) throws IOException { try { RegionInfo regionInfo = connection.getRegionLocator(table.getName()) - .getRegionLocation(getSplitKey(i), false).getRegion(); + .getRegionLocation(getSplitKey(i), false).getRegion(); LOG.debug("deleting region from meta: " + regionInfo); - Delete delete = MetaTableAccessor - .makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); + Delete delete = + MetaTableAccessor.makeDeleteFromRegionInfo(regionInfo, HConstants.LATEST_TIMESTAMP); try (Table t = MetaTableAccessor.getMetaHTable(connection)) { t.delete(delete); } @@ -2282,7 +2263,7 @@ boolean testRow(final int i, final long startTime) throws IOException { byte[] familyName = Bytes.toBytes(FAMILY_NAME_BASE + family); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); get.addColumn(familyName, qualifier); } } else { @@ -2300,7 +2281,6 @@ boolean testRow(final int i, final long startTime) throws IOException { static class SequentialWriteTest extends BufferedMutatorTest { private ArrayList puts; - SequentialWriteTest(Connection con, TestOptions options, Status status) { super(con, options, status); if (opts.multiPut > 0) { @@ -2320,7 +2300,7 @@ boolean testRow(final int i, final long startTime) throws IOException { for (int family = 0; family < opts.families; family++) { byte familyName[] = Bytes.toBytes(FAMILY_NAME_BASE + family); for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); byte[] value = generateData(this.rand, getValueLength(this.rand)); if (opts.useTags) { byte[] tag = generateData(this.rand, TAG_LENGTH); @@ -2329,8 +2309,8 @@ boolean testRow(final int i, final long startTime) throws IOException { Tag t = new ArrayBackedTag((byte) n, tag); tags[n] = t; } - KeyValue kv = new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, - value, tags); + KeyValue kv = + new KeyValue(row, familyName, qualifier, HConstants.LATEST_TIMESTAMP, value, tags); put.add(kv); updateValueSize(kv.getValueLength()); } else { @@ -2360,7 +2340,7 @@ boolean testRow(final int i, final long startTime) throws IOException { } /* - Insert fake regions into meta table with contiguous split keys. + * Insert fake regions into meta table with contiguous split keys. */ static class MetaWriteTest extends MetaTest { @@ -2372,27 +2352,26 @@ static class MetaWriteTest extends MetaTest { boolean testRow(final int i, final long startTime) throws IOException { List regionInfos = new ArrayList(); RegionInfo regionInfo = (RegionInfoBuilder.newBuilder(TableName.valueOf(TABLE_NAME)) - .setStartKey(getSplitKey(i)) - .setEndKey(getSplitKey(i + 1)) - .build()); + .setStartKey(getSplitKey(i)).setEndKey(getSplitKey(i + 1)).build()); regionInfos.add(regionInfo); MetaTableAccessor.addRegionsToMeta(connection, regionInfos, 1); // write the serverName columns - MetaTableAccessor.updateRegionLocation(connection, - regionInfo, ServerName.valueOf("localhost", 60010, rand.nextLong()), i, + MetaTableAccessor.updateRegionLocation(connection, regionInfo, + ServerName.valueOf("localhost", 60010, rand.nextLong()), i, EnvironmentEdgeManager.currentTime()); return true; } } + static class FilteredScanTest extends TableTest { protected static final Logger LOG = LoggerFactory.getLogger(FilteredScanTest.class.getName()); FilteredScanTest(Connection con, TestOptions options, Status status) { super(con, options, status); if (opts.perClientRunRows == DEFAULT_ROWS_PER_GB) { - LOG.warn("Option \"rows\" unspecified. Using default value " + DEFAULT_ROWS_PER_GB + - ". This could take a very long time."); + LOG.warn("Option \"rows\" unspecified. Using default value " + DEFAULT_ROWS_PER_GB + + ". This could take a very long time."); } } @@ -2417,8 +2396,8 @@ boolean testRow(int i, final long startTime) throws IOException { protected Scan constructScan(byte[] valuePrefix) throws IOException { FilterList list = new FilterList(); - Filter filter = new SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO, - CompareOperator.EQUAL, new BinaryComparator(valuePrefix)); + Filter filter = new SingleColumnValueFilter(FAMILY_ZERO, COLUMN_ZERO, CompareOperator.EQUAL, + new BinaryComparator(valuePrefix)); list.addFilter(filter); if (opts.filterAll) { list.addFilter(new FilterAllFilter()); @@ -2428,7 +2407,7 @@ protected Scan constructScan(byte[] valuePrefix) throws IOException { .setScanMetricsEnabled(true); if (opts.addColumns) { for (int column = 0; column < opts.columns; column++) { - byte [] qualifier = column == 0? COLUMN_ZERO: Bytes.toBytes("" + column); + byte[] qualifier = column == 0 ? COLUMN_ZERO : Bytes.toBytes("" + column); scan.addColumn(FAMILY_ZERO, qualifier); } } else { @@ -2445,60 +2424,61 @@ protected Scan constructScan(byte[] valuePrefix) throws IOException { * @param timeMs Time taken in milliseconds. * @return String value with label, ie '123.76 MB/s' */ - private static String calculateMbps(int rows, long timeMs, final int valueSize, int families, int columns) { - BigDecimal rowSize = BigDecimal.valueOf(ROW_LENGTH + - ((valueSize + (FAMILY_NAME_BASE.length()+1) + COLUMN_ZERO.length) * columns) * families); - BigDecimal mbps = BigDecimal.valueOf(rows).multiply(rowSize, CXT) - .divide(BigDecimal.valueOf(timeMs), CXT).multiply(MS_PER_SEC, CXT) - .divide(BYTES_PER_MB, CXT); + private static String calculateMbps(int rows, long timeMs, final int valueSize, int families, + int columns) { + BigDecimal rowSize = BigDecimal.valueOf( + ROW_LENGTH + ((valueSize + (FAMILY_NAME_BASE.length() + 1) + COLUMN_ZERO.length) * columns) + * families); + BigDecimal mbps = + BigDecimal.valueOf(rows).multiply(rowSize, CXT).divide(BigDecimal.valueOf(timeMs), CXT) + .multiply(MS_PER_SEC, CXT).divide(BYTES_PER_MB, CXT); return FMT.format(mbps) + " MB/s"; } /* * Format passed integer. * @param number - * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed - * number (Does absolute in case number is negative). + * @return Returns zero-prefixed ROW_LENGTH-byte wide decimal version of passed number (Does + * absolute in case number is negative). */ - public static byte [] format(final int number) { - byte [] b = new byte[ROW_LENGTH]; + public static byte[] format(final int number) { + byte[] b = new byte[ROW_LENGTH]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { - b[i] = (byte)((d % 10) + '0'); + b[i] = (byte) ((d % 10) + '0'); d /= 10; } return b; } /* - * This method takes some time and is done inline uploading data. For - * example, doing the mapfile test, generation of the key and value - * consumes about 30% of CPU time. + * This method takes some time and is done inline uploading data. For example, doing the mapfile + * test, generation of the key and value consumes about 30% of CPU time. * @return Generated random value to insert into a table cell. */ public static byte[] generateData(final Random r, int length) { - byte [] b = new byte [length]; + byte[] b = new byte[length]; int i; - for(i = 0; i < (length-8); i += 8) { + for (i = 0; i < (length - 8); i += 8) { b[i] = (byte) (65 + r.nextInt(26)); - b[i+1] = b[i]; - b[i+2] = b[i]; - b[i+3] = b[i]; - b[i+4] = b[i]; - b[i+5] = b[i]; - b[i+6] = b[i]; - b[i+7] = b[i]; + b[i + 1] = b[i]; + b[i + 2] = b[i]; + b[i + 3] = b[i]; + b[i + 4] = b[i]; + b[i + 5] = b[i]; + b[i + 6] = b[i]; + b[i + 7] = b[i]; } byte a = (byte) (65 + r.nextInt(26)); - for(; i < length; i++) { + for (; i < length; i++) { b[i] = a; } return b; } - static byte [] getRandomRow(final Random random, final int totalRows) { + static byte[] getRandomRow(final Random random, final int totalRows) { return format(generateRandomRow(random, totalRows)); } @@ -2509,8 +2489,8 @@ static int generateRandomRow(final Random random, final int totalRows) { static RunResult runOneClient(final Class cmd, Configuration conf, Connection con, AsyncConnection asyncCon, TestOptions opts, final Status status) throws IOException, InterruptedException { - status.setStatus("Start " + cmd + " at offset " + opts.startRow + " for " - + opts.perClientRunRows + " rows"); + status.setStatus( + "Start " + cmd + " at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows"); long totalElapsedTime; final TestBase t; @@ -2536,21 +2516,22 @@ static RunResult runOneClient(final Class cmd, Configuration } totalElapsedTime = t.test(); - status.setStatus("Finished " + cmd + " in " + totalElapsedTime + - "ms at offset " + opts.startRow + " for " + opts.perClientRunRows + " rows" + - " (" + calculateMbps((int)(opts.perClientRunRows * opts.sampleRate), totalElapsedTime, - getAverageValueLength(opts), opts.families, opts.columns) + ")"); + status.setStatus("Finished " + cmd + " in " + totalElapsedTime + "ms at offset " + opts.startRow + + " for " + opts.perClientRunRows + " rows" + " (" + + calculateMbps((int) (opts.perClientRunRows * opts.sampleRate), totalElapsedTime, + getAverageValueLength(opts), opts.families, opts.columns) + + ")"); return new RunResult(totalElapsedTime, t.numOfReplyOverLatencyThreshold, - t.numOfReplyFromReplica, t.getLatencyHistogram()); + t.numOfReplyFromReplica, t.getLatencyHistogram()); } private static int getAverageValueLength(final TestOptions opts) { - return opts.valueRandom? opts.valueSize/2: opts.valueSize; + return opts.valueRandom ? opts.valueSize / 2 : opts.valueSize; } - private void runTest(final Class cmd, TestOptions opts) throws IOException, - InterruptedException, ClassNotFoundException, ExecutionException { + private void runTest(final Class cmd, TestOptions opts) + throws IOException, InterruptedException, ClassNotFoundException, ExecutionException { // Log the configuration we're going to run with. Uses JSON mapper because lazy. It'll do // the TestOptions introspection for us and dump the output in a readable format. LOG.info(cmd.getSimpleName() + " test run options=" + GSON.toJson(opts)); @@ -2592,86 +2573,91 @@ protected static void printUsage(final String shortName, final String message) { System.err.println(" [-D]* "); System.err.println(); System.err.println("General Options:"); - System.err.println(" nomapred Run multiple clients using threads " + - "(rather than use mapreduce)"); - System.err.println(" oneCon all the threads share the same connection. Default: False"); + System.err.println( + " nomapred Run multiple clients using threads " + "(rather than use mapreduce)"); + System.err + .println(" oneCon all the threads share the same connection. Default: False"); System.err.println(" connCount connections all threads share. " + "For example, if set to 2, then all thread share 2 connection. " + "Default: depend on oneCon parameter. if oneCon set to true, then connCount=1, " + "if not, connCount=thread number"); - System.err.println(" sampleRate Execute test on a sample of total " + - "rows. Only supported by randomRead. Default: 1.0"); - System.err.println(" period Report every 'period' rows: " + - "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows()/10); + System.err.println(" sampleRate Execute test on a sample of total " + + "rows. Only supported by randomRead. Default: 1.0"); + System.err.println(" period Report every 'period' rows: " + + "Default: opts.perClientRunRows / 10 = " + DEFAULT_OPTS.getPerClientRunRows() / 10); System.err.println(" cycles How many times to cycle the test. Defaults: 1."); - System.err.println(" traceRate Enable HTrace spans. Initiate tracing every N rows. " + - "Default: 0"); + System.err.println( + " traceRate Enable HTrace spans. Initiate tracing every N rows. " + "Default: 0"); System.err.println(" latency Set to report operation latencies. Default: False"); - System.err.println(" latencyThreshold Set to report number of operations with latency " + - "over lantencyThreshold, unit in millisecond, default 0"); - System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" + - " rows have been treated. Default: 0"); - System.err.println(" valueSize Pass value size to use: Default: " - + DEFAULT_OPTS.getValueSize()); - System.err.println(" valueRandom Set if we should vary value size between 0 and " + - "'valueSize'; set on read for stats on size: Default: Not set."); + System.err.println(" latencyThreshold Set to report number of operations with latency " + + "over lantencyThreshold, unit in millisecond, default 0"); + System.err.println(" measureAfter Start to measure the latency once 'measureAfter'" + + " rows have been treated. Default: 0"); + System.err.println( + " valueSize Pass value size to use: Default: " + DEFAULT_OPTS.getValueSize()); + System.err.println(" valueRandom Set if we should vary value size between 0 and " + + "'valueSize'; set on read for stats on size: Default: Not set."); System.err.println(" blockEncoding Block encoding to use. Value should be one of " + Arrays.toString(DataBlockEncoding.values()) + ". Default: NONE"); System.err.println(); System.err.println("Table Creation / Write Tests:"); System.err.println(" table Alternate table name. Default: 'TestTable'"); - System.err.println(" rows Rows each client runs. Default: " - + DEFAULT_OPTS.getPerClientRunRows() - + ". In case of randomReads and randomSeekScans this could" - + " be specified along with --size to specify the number of rows to be scanned within" - + " the total range specified by the size."); + System.err.println( + " rows Rows each client runs. Default: " + DEFAULT_OPTS.getPerClientRunRows() + + ". In case of randomReads and randomSeekScans this could" + + " be specified along with --size to specify the number of rows to be scanned within" + + " the total range specified by the size."); System.err.println( " size Total size in GiB. Mutually exclusive with --rows for writes and scans" + ". But for randomReads and randomSeekScans when you use size with --rows you could" + " use size to specify the end range and --rows" + " specifies the number of rows within that range. " + "Default: 1.0."); System.err.println(" compress Compression type to use (GZ, LZO, ...). Default: 'NONE'"); - System.err.println(" flushCommits Used to determine if the test should flush the table. " + - "Default: false"); - System.err.println(" valueZipf Set if we should vary value size between 0 and " + - "'valueSize' in zipf form: Default: Not set."); + System.err.println( + " flushCommits Used to determine if the test should flush the table. " + "Default: false"); + System.err.println(" valueZipf Set if we should vary value size between 0 and " + + "'valueSize' in zipf form: Default: Not set."); System.err.println(" writeToWAL Set writeToWAL on puts. Default: True"); System.err.println(" autoFlush Set autoFlush on htable. Default: False"); - System.err.println(" multiPut Batch puts together into groups of N. Only supported " + - "by write. If multiPut is bigger than 0, autoFlush need to set to true. Default: 0"); + System.err.println(" multiPut Batch puts together into groups of N. Only supported " + + "by write. If multiPut is bigger than 0, autoFlush need to set to true. Default: 0"); System.err.println(" presplit Create presplit table. If a table with same name exists," + " it'll be deleted and recreated (instead of verifying count of its existing regions). " + "Recommended for accurate perf analysis (see guide). Default: disabled"); - System.err.println(" usetags Writes tags along with KVs. Use with HFile V3. " + - "Default: false"); - System.err.println(" numoftags Specify the no of tags that would be needed. " + - "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); + System.err.println( + " usetags Writes tags along with KVs. Use with HFile V3. " + "Default: false"); + System.err.println(" numoftags Specify the no of tags that would be needed. " + + "This works only if usetags is true. Default: " + DEFAULT_OPTS.noOfTags); System.err.println(" splitPolicy Specify a custom RegionSplitPolicy for the table."); System.err.println(" columns Columns to write per row. Default: 1"); - System.err.println(" families Specify number of column families for the table. Default: 1"); + System.err + .println(" families Specify number of column families for the table. Default: 1"); System.err.println(); System.err.println("Read Tests:"); System.err.println(" filterAll Helps to filter out all the rows on the server side" + " there by not returning any thing back to the client. Helps to check the server side" + " performance. Uses FilterAllFilter internally. "); - System.err.println(" multiGet Batch gets together into groups of N. Only supported " + - "by randomRead. Default: disabled"); - System.err.println(" inmemory Tries to keep the HFiles of the CF " + - "inmemory as far as possible. Not guaranteed that reads are always served " + - "from memory. Default: false"); - System.err.println(" bloomFilter Bloom filter type, one of " - + Arrays.toString(BloomType.values())); + System.err.println(" multiGet Batch gets together into groups of N. Only supported " + + "by randomRead. Default: disabled"); + System.err.println(" inmemory Tries to keep the HFiles of the CF " + + "inmemory as far as possible. Not guaranteed that reads are always served " + + "from memory. Default: false"); + System.err.println( + " bloomFilter Bloom filter type, one of " + Arrays.toString(BloomType.values())); System.err.println(" blockSize Blocksize to use when writing out hfiles. "); - System.err.println(" inmemoryCompaction Makes the column family to do inmemory flushes/compactions. " - + "Uses the CompactingMemstore"); + System.err + .println(" inmemoryCompaction Makes the column family to do inmemory flushes/compactions. " + + "Uses the CompactingMemstore"); System.err.println(" addColumns Adds columns to scans/gets explicitly. Default: true"); System.err.println(" replicas Enable region replica testing. Defaults: 1."); - System.err.println(" randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); + System.err.println( + " randomSleep Do a random sleep before each get between 0 and entered value. Defaults: 0"); System.err.println(" caching Scan caching to use. Default: 30"); System.err.println(" asyncPrefetch Enable asyncPrefetch for scan"); System.err.println(" cacheBlocks Set the cacheBlocks option for scan. Default: true"); - System.err.println(" scanReadType Set the readType option for scan, stream/pread/default. Default: default"); + System.err.println( + " scanReadType Set the readType option for scan, stream/pread/default. Default: default"); System.err.println(" bufferSize Set the value of client side buffering. Default: 2MB"); System.err.println(); System.err.println(" Note: -D properties will be applied to the conf used. "); @@ -2695,10 +2681,10 @@ protected static void printUsage(final String shortName, final String message) { } /** - * Parse options passed in via an arguments array. Assumes that array has been split - * on white-space and placed into a {@code Queue}. Any unknown arguments will remain - * in the queue at the conclusion of this method call. It's up to the caller to deal - * with these unrecognized arguments. + * Parse options passed in via an arguments array. Assumes that array has been split on + * white-space and placed into a {@code Queue}. Any unknown arguments will remain in the queue at + * the conclusion of this method call. It's up to the caller to deal with these unrecognized + * arguments. */ static TestOptions parseOpts(Queue args) { TestOptions opts = new TestOptions(); @@ -2887,7 +2873,7 @@ static TestOptions parseOpts(Queue args) { } final String blockSize = "--blockSize="; - if(cmd.startsWith(blockSize) ) { + if (cmd.startsWith(blockSize)) { opts.blockSize = Integer.parseInt(cmd.substring(blockSize.length())); continue; } @@ -2996,17 +2982,17 @@ static TestOptions parseOpts(Queue args) { } /** - * Validates opts after all the opts are parsed, so that caller need not to maintain order of opts - */ - private static void validateParsedOpts(TestOptions opts) { + * Validates opts after all the opts are parsed, so that caller need not to maintain order of opts + */ + private static void validateParsedOpts(TestOptions opts) { if (!opts.autoFlush && opts.multiPut > 0) { throw new IllegalArgumentException("autoFlush must be true when multiPut is more than 0"); } if (opts.oneCon && opts.connCount > 1) { - throw new IllegalArgumentException("oneCon is set to true, " - + "connCount should not bigger than 1"); + throw new IllegalArgumentException( + "oneCon is set to true, " + "connCount should not bigger than 1"); } if (opts.valueZipf && opts.valueRandom) { @@ -3033,8 +3019,8 @@ static TestOptions calculateRowsAndSize(final TestOptions opts) { } static int getRowsPerGB(final TestOptions opts) { - return ONE_GB / ((opts.valueRandom? opts.valueSize/2: opts.valueSize) * opts.getFamilies() * - opts.getColumns()); + return ONE_GB / ((opts.valueRandom ? opts.valueSize / 2 : opts.valueSize) * opts.getFamilies() + * opts.getColumns()); } @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java index ca2813012a94..d51ec058c8db 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/ScanPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase; import java.io.IOException; @@ -49,8 +48,7 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; /** - * A simple performance evaluation tool for single client and MR scans - * and snapshot scans. + * A simple performance evaluation tool for single client and MR scans and snapshot scans. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class ScanPerformanceEvaluation extends AbstractHBaseTool { @@ -78,7 +76,8 @@ public void setConf(Configuration conf) { @Override protected void addOptions() { - this.addRequiredOptWithArg("t", "type", "the type of the test. One of the following: streaming|scan|snapshotscan|scanmapreduce|snapshotscanmapreduce"); + this.addRequiredOptWithArg("t", "type", + "the type of the test. One of the following: streaming|scan|snapshotscan|scanmapreduce|snapshotscanmapreduce"); this.addOptWithArg("f", "file", "the filename to read from"); this.addOptWithArg("tn", "table", "the tablename to read from"); this.addOptWithArg("sn", "snapshot", "the snapshot name to read from"); @@ -119,15 +118,15 @@ protected void testHdfsStreaming(Path filename) throws IOException { } streamTimer.stop(); - double throughput = (double)totalBytes / streamTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / streamTimer.elapsed(TimeUnit.SECONDS); System.out.println("HDFS streaming: "); - System.out.println("total time to open: " + - fileOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out + .println("total time to open: " + fileOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to read: " + streamTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throghput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throghput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); } private Scan getScan() { @@ -176,30 +175,30 @@ public void testScan() throws IOException { ScanMetrics metrics = scan.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan: "); - System.out.println("total time to open table: " + - tableOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to scan: " + - scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open table: " + tableOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("Scan metrics:\n" + metrics.getMetricsMap()); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } - public void testSnapshotScan() throws IOException { Stopwatch snapshotRestoreTimer = Stopwatch.createUnstarted(); Stopwatch scanOpenTimer = Stopwatch.createUnstarted(); @@ -233,40 +232,39 @@ public void testSnapshotScan() throws IOException { ScanMetrics metrics = scanner.getScanMetrics(); long totalBytes = metrics.countOfBytesInResults.get(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan snapshot: "); - System.out.println("total time to restore snapshot: " + - snapshotRestoreTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total time to scan: " + - scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to restore snapshot: " + + snapshotRestoreTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("Scan metrics:\n" + metrics.getMetricsMap()); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } public static enum ScanCounter { - NUM_ROWS, - NUM_CELLS, + NUM_ROWS, NUM_CELLS, } public static class MyMapper extends TableMapper { @Override - protected void map(ImmutableBytesWritable key, Result value, - Context context) throws IOException, - InterruptedException { + protected void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { context.getCounter(ScanCounter.NUM_ROWS).increment(1); context.getCounter(ScanCounter.NUM_CELLS).increment(value.rawCells().length); } @@ -285,14 +283,8 @@ public void testScanMapReduce() throws IOException, InterruptedException, ClassN job.setJarByClass(getClass()); - TableMapReduceUtil.initTableMapperJob( - this.tablename, - scan, - MyMapper.class, - NullWritable.class, - NullWritable.class, - job - ); + TableMapReduceUtil.initTableMapperJob(this.tablename, scan, MyMapper.class, NullWritable.class, + NullWritable.class, job); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); @@ -308,25 +300,28 @@ public void testScanMapReduce() throws IOException, InterruptedException, ClassN long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } - public void testSnapshotScanMapReduce() throws IOException, InterruptedException, ClassNotFoundException { + public void testSnapshotScanMapReduce() + throws IOException, InterruptedException, ClassNotFoundException { Stopwatch scanOpenTimer = Stopwatch.createUnstarted(); Stopwatch scanTimer = Stopwatch.createUnstarted(); @@ -339,16 +334,8 @@ public void testSnapshotScanMapReduce() throws IOException, InterruptedException job.setJarByClass(getClass()); - TableMapReduceUtil.initTableSnapshotMapperJob( - this.snapshotName, - scan, - MyMapper.class, - NullWritable.class, - NullWritable.class, - job, - true, - new Path(restoreDir) - ); + TableMapReduceUtil.initTableSnapshotMapperJob(this.snapshotName, scan, MyMapper.class, + NullWritable.class, NullWritable.class, job, true, new Path(restoreDir)); job.setNumReduceTasks(0); job.setOutputKeyClass(NullWritable.class); @@ -364,29 +351,31 @@ public void testSnapshotScanMapReduce() throws IOException, InterruptedException long numCells = counters.findCounter(ScanCounter.NUM_CELLS).getValue(); long totalBytes = counters.findCounter(HBASE_COUNTER_GROUP_NAME, "BYTES_IN_RESULTS").getValue(); - double throughput = (double)totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputRows = (double)numRows / scanTimer.elapsed(TimeUnit.SECONDS); - double throughputCells = (double)numCells / scanTimer.elapsed(TimeUnit.SECONDS); + double throughput = (double) totalBytes / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputRows = (double) numRows / scanTimer.elapsed(TimeUnit.SECONDS); + double throughputCells = (double) numCells / scanTimer.elapsed(TimeUnit.SECONDS); System.out.println("HBase scan mapreduce: "); - System.out.println("total time to open scanner: " + - scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); + System.out.println( + "total time to open scanner: " + scanOpenTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); System.out.println("total time to scan: " + scanTimer.elapsed(TimeUnit.MILLISECONDS) + " ms"); - System.out.println("total bytes: " + totalBytes + " bytes (" - + StringUtils.humanReadableInt(totalBytes) + ")"); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughput) + "B/s"); + System.out.println( + "total bytes: " + totalBytes + " bytes (" + StringUtils.humanReadableInt(totalBytes) + ")"); + System.out.println("throughput : " + StringUtils.humanReadableInt((long) throughput) + "B/s"); System.out.println("total rows : " + numRows); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputRows) + " rows/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputRows) + " rows/s"); System.out.println("total cells : " + numCells); - System.out.println("throughput : " + StringUtils.humanReadableInt((long)throughputCells) + " cells/s"); + System.out.println( + "throughput : " + StringUtils.humanReadableInt((long) throughputCells) + " cells/s"); } @Override protected int doWork() throws Exception { if (type.equals("streaming")) { testHdfsStreaming(new Path(file)); - } else if (type.equals("scan")){ + } else if (type.equals("scan")) { testScan(); } else if (type.equals("snapshotscan")) { testSnapshotScan(); @@ -398,7 +387,7 @@ protected int doWork() throws Exception { return 0; } - public static void main (String[] args) throws Exception { + public static void main(String[] args) throws Exception { int ret = ToolRunner.run(HBaseConfiguration.create(), new ScanPerformanceEvaluation(), args); System.exit(ret); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java index 475960dde35c..e9bef3ea003f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/TestPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,6 @@ import java.util.Queue; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -54,7 +53,7 @@ import org.apache.hbase.thirdparty.com.google.gson.Gson; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestPerformanceEvaluation { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -64,14 +63,13 @@ public class TestPerformanceEvaluation { @Test public void testDefaultInMemoryCompaction() { - PerformanceEvaluation.TestOptions defaultOpts = - new PerformanceEvaluation.TestOptions(); + PerformanceEvaluation.TestOptions defaultOpts = new PerformanceEvaluation.TestOptions(); assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT, - defaultOpts.getInMemoryCompaction().toString()); + defaultOpts.getInMemoryCompaction().toString()); HTableDescriptor htd = PerformanceEvaluation.getTableDescriptor(defaultOpts); - for (HColumnDescriptor hcd: htd.getFamilies()) { + for (HColumnDescriptor hcd : htd.getFamilies()) { assertEquals(CompactingMemStore.COMPACTING_MEMSTORE_TYPE_DEFAULT, - hcd.getInMemoryCompaction().toString()); + hcd.getInMemoryCompaction().toString()); } } @@ -83,7 +81,7 @@ public void testSerialization() { Gson gson = GsonUtil.createGson().create(); String optionsString = gson.toJson(options); PerformanceEvaluation.TestOptions optionsDeserialized = - gson.fromJson(optionsString, PerformanceEvaluation.TestOptions.class); + gson.fromJson(optionsString, PerformanceEvaluation.TestOptions.class); assertTrue(optionsDeserialized.isAutoFlush()); } @@ -97,7 +95,7 @@ public void testWriteInputFile() throws IOException { opts.setNumClientThreads(clients); opts.setPerClientRunRows(10); Path dir = - PerformanceEvaluation.writeInputFile(HTU.getConfiguration(), opts, HTU.getDataTestDir()); + PerformanceEvaluation.writeInputFile(HTU.getConfiguration(), opts, HTU.getDataTestDir()); FileSystem fs = FileSystem.get(HTU.getConfiguration()); Path p = new Path(dir, PerformanceEvaluation.JOB_INPUT_FILENAME); long len = fs.getFileStatus(p).getLen(); @@ -106,7 +104,7 @@ public void testWriteInputFile() throws IOException { try (FSDataInputStream dis = fs.open(p)) { dis.readFully(content); BufferedReader br = new BufferedReader( - new InputStreamReader(new ByteArrayInputStream(content), StandardCharsets.UTF_8)); + new InputStreamReader(new ByteArrayInputStream(content), StandardCharsets.UTF_8)); int count = 0; while (br.readLine() != null) { count++; @@ -177,9 +175,9 @@ public void testZipfian() throws NoSuchMethodException, SecurityException, Insta opts.setValueSize(valueSize); RandomReadTest rrt = new RandomReadTest(null, opts, null); Constructor ctor = - Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class); + Histogram.class.getDeclaredConstructor(com.codahale.metrics.Reservoir.class); ctor.setAccessible(true); - Histogram histogram = (Histogram)ctor.newInstance(new UniformReservoir(1024 * 500)); + Histogram histogram = (Histogram) ctor.newInstance(new UniformReservoir(1024 * 500)); for (int i = 0; i < 100; i++) { histogram.update(rrt.getValueLength(null)); } @@ -258,7 +256,7 @@ public void testParseOptsMultiPuts() { System.out.println(e.getMessage()); } - //Re-create options + // Re-create options opts = new LinkedList<>(); opts.offer("--autoFlush=true"); opts.offer("--multiPut=10"); @@ -342,7 +340,7 @@ public void testParseOptsValueRandom() { try { options = PerformanceEvaluation.parseOpts(opts); fail("should fail"); - } catch (IllegalStateException e) { + } catch (IllegalStateException e) { System.out.println(e.getMessage()); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java index 327b7afec2fb..6c49a43bf463 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestDriver.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,18 +29,17 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestDriver { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestDriver.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestDriver.class); @Test public void testDriverMainMethod() throws Throwable { ProgramDriver programDriverMock = mock(ProgramDriver.class); Driver.setProgramDriver(programDriverMock); - Driver.main(new String[]{}); + Driver.main(new String[] {}); verify(programDriverMock).driver(Mockito.any()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java index 12db348ba8b8..092c53b4c20e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestGroupingTableMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableList; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestGroupingTableMap { @ClassRule @@ -58,8 +58,7 @@ public class TestGroupingTableMap { @Test @SuppressWarnings({ "deprecation", "unchecked" }) - public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() - throws Exception { + public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() throws Exception { GroupingTableMap gTableMap = null; try { Result result = mock(Result.class); @@ -71,10 +70,10 @@ public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() gTableMap.configure(jobConf); byte[] row = {}; - List keyValues = ImmutableList.of( - new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), - new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("2222")), - new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("3333"))); + List keyValues = ImmutableList. of( + new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), + new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("2222")), + new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector outputCollectorMock = mock(OutputCollector.class); @@ -82,8 +81,7 @@ public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() verify(result).listCells(); verifyZeroInteractions(outputCollectorMock); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -101,21 +99,19 @@ public void shouldCreateNewKeyAlthoughExtraKey() throws Exception { gTableMap.configure(jobConf); byte[] row = {}; - List keyValues = ImmutableList.of( - new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), - new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("2222")), - new KeyValue(row, "familyC".getBytes(), "qualifierC".getBytes(), Bytes.toBytes("3333"))); + List keyValues = ImmutableList. of( + new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), + new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("2222")), + new KeyValue(row, "familyC".getBytes(), "qualifierC".getBytes(), Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector outputCollectorMock = mock(OutputCollector.class); gTableMap.map(null, result, outputCollectorMock, reporter); verify(result).listCells(); - verify(outputCollectorMock, times(1)) - .collect(any(), any()); + verify(outputCollectorMock, times(1)).collect(any(), any()); verifyNoMoreInteractions(outputCollectorMock); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -136,22 +132,22 @@ public void shouldCreateNewKey() throws Exception { final byte[] firstPartKeyValue = Bytes.toBytes("34879512738945"); final byte[] secondPartKeyValue = Bytes.toBytes("35245142671437"); byte[] row = {}; - List cells = ImmutableList.of( - new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), firstPartKeyValue), - new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), secondPartKeyValue)); + List cells = ImmutableList. of( + new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), firstPartKeyValue), + new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), secondPartKeyValue)); when(result.listCells()).thenReturn(cells); final AtomicBoolean outputCollected = new AtomicBoolean(); OutputCollector outputCollector = new OutputCollector() { - @Override - public void collect(ImmutableBytesWritable arg, Result result) throws IOException { - assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives. - Bytes.concat(firstPartKeyValue, bSeparator, - secondPartKeyValue), arg.copyBytes()); - outputCollected.set(true); - } - }; + @Override + public void collect(ImmutableBytesWritable arg, Result result) throws IOException { + assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(firstPartKeyValue, bSeparator, secondPartKeyValue), + arg.copyBytes()); + outputCollected.set(true); + } + }; gTableMap.map(null, result, outputCollector, reporter); verify(result).listCells(); @@ -161,12 +157,11 @@ public void collect(ImmutableBytesWritable arg, Result result) throws IOExceptio final byte[] secondPartValue = Bytes.toBytes("4678456942345"); byte[][] data = { firstPartValue, secondPartValue }; ImmutableBytesWritable byteWritable = gTableMap.createGroupKey(data); - assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives. - Bytes.concat(firstPartValue, - bSeparator, secondPartValue), byteWritable.get()); + assertArrayEquals(org.apache.hbase.thirdparty.com.google.common.primitives.Bytes + .concat(firstPartValue, bSeparator, secondPartValue), + byteWritable.get()); } finally { - if (gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } @@ -178,8 +173,7 @@ public void shouldReturnNullFromCreateGroupKey() throws Exception { gTableMap = new GroupingTableMap(); assertNull(gTableMap.createGroupKey(null)); } finally { - if(gTableMap != null) - gTableMap.close(); + if (gTableMap != null) gTableMap.close(); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java index 25576c1ef420..f90a45701eb0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestIdentityTableMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestIdentityTableMap { @ClassRule @@ -55,14 +55,11 @@ public void shouldCollectPredefinedTimes() throws IOException { mock(OutputCollector.class); for (int i = 0; i < recordNumber; i++) - identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, - reporterMock); + identityTableMap.map(bytesWritableMock, resultMock, outputCollectorMock, reporterMock); - verify(outputCollectorMock, times(recordNumber)).collect( - Mockito.any(), Mockito.any()); + verify(outputCollectorMock, times(recordNumber)).collect(Mockito.any(), Mockito.any()); } finally { - if (identityTableMap != null) - identityTableMap.close(); + if (identityTableMap != null) identityTableMap.close(); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java index 1dd3e69f9775..64cd2f035933 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestMultiTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ protected void runJob(String jobName, Configuration c, List scans) job.setReducerClass(Reducer.class); TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), Mapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); + ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); TableMapReduceUtil.addDependencyJars(job); @@ -92,10 +92,8 @@ public void map(ImmutableBytesWritable key, Result value, } /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * + * Closes this stream and releases any system resources associated with it. If the stream is + * already closed then invoking this method has no effect. * @throws IOException if an I/O error occurs */ @Override @@ -109,8 +107,7 @@ public void configure(JobConf jobConf) { } public static class Reducer extends TestMultiTableSnapshotInputFormat.ScanReducer implements - org.apache.hadoop.mapred.Reducer { + org.apache.hadoop.mapred.Reducer { private JobConf jobConf; @@ -122,10 +119,8 @@ public void reduce(ImmutableBytesWritable key, Iterator } /** - * Closes this stream and releases any system resources associated - * with it. If the stream is already closed then invoking this - * method has no effect. - * + * Closes this stream and releases any system resources associated with it. If the stream is + * already closed then invoking this method has no effect. * @throws IOException if an I/O error occurs */ @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java index 13913e5fc24a..828008645527 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestRowCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ import org.apache.hbase.thirdparty.com.google.common.base.Joiner; -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestRowCounter { @ClassRule @@ -68,8 +68,7 @@ void doRead() { @Test @SuppressWarnings("deprecation") - public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() - throws Exception { + public void shouldExitAndPrintUsageSinceParameterNumberLessThanThree() throws Exception { final String[] args = new String[] { "one", "two" }; String line = "ERROR: Wrong number of parameters: " + args.length; String result = new OutputReader(System.err) { @@ -90,10 +89,9 @@ public void shouldRegInReportEveryIncomingRow() throws IOException { Reporter reporter = mock(Reporter.class); for (int i = 0; i < iterationNumber; i++) mapper.map(mock(ImmutableBytesWritable.class), mock(Result.class), - mock(OutputCollector.class), reporter); + mock(OutputCollector.class), reporter); - Mockito.verify(reporter, times(iterationNumber)).incrCounter( - any(), anyLong()); + Mockito.verify(reporter, times(iterationNumber)).incrCounter(any(), anyLong()); } @Test @@ -101,8 +99,7 @@ public void shouldRegInReportEveryIncomingRow() throws IOException { public void shouldCreateAndRunSubmittableJob() throws Exception { RowCounter rCounter = new RowCounter(); rCounter.setConf(HBaseConfiguration.create()); - String[] args = new String[] { "\temp", "tableA", "column1", "column2", - "column3" }; + String[] args = new String[] { "\temp", "tableA", "column1", "column2", "column3" }; JobConf jobConfig = rCounter.createSubmittableJob(args); assertNotNull(jobConfig); @@ -110,8 +107,8 @@ public void shouldCreateAndRunSubmittableJob() throws Exception { assertEquals("rowcounter", jobConfig.getJobName()); assertEquals(jobConfig.getMapOutputValueClass(), Result.class); assertEquals(jobConfig.getMapperClass(), RowCounterMapper.class); - assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), Joiner.on(' ') - .join("column1", "column2", "column3")); + assertEquals(jobConfig.get(TableInputFormat.COLUMN_LIST), + Joiner.on(' ').join("column1", "column2", "column3")); assertEquals(jobConfig.getMapOutputKeyClass(), ImmutableBytesWritable.class); } @@ -147,17 +144,16 @@ protected String read() throws Exception { return new String(outBytes.toByteArray()); } finally { switch (outs) { - case OUT: { - System.setOut(oldPrintStream); - break; - } - case ERR: { - System.setErr(oldPrintStream); - break; - } - default: - throw new IllegalStateException( - "OutputReader: unsupported PrintStream"); + case OUT: { + System.setOut(oldPrintStream); + break; + } + case ERR: { + System.setErr(oldPrintStream); + break; + } + default: + throw new IllegalStateException("OutputReader: unsupported PrintStream"); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java index 7b097d264cef..36d6470238f8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestSplitTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -33,7 +33,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestSplitTable { @ClassRule @@ -44,16 +44,16 @@ public class TestSplitTable { public TestName name = new TestName(); @Test - @SuppressWarnings({"deprecation", "SelfComparison"}) + @SuppressWarnings({ "deprecation", "SelfComparison" }) public void testSplitTableCompareTo() { - TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("aaa"), Bytes.toBytes("ddd"), "locationA"); + TableSplit aTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("aaa"), + Bytes.toBytes("ddd"), "locationA"); - TableSplit bTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("iii"), Bytes.toBytes("kkk"), "locationA"); + TableSplit bTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("iii"), + Bytes.toBytes("kkk"), "locationA"); - TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"), - Bytes.toBytes("lll"), Bytes.toBytes("zzz"), "locationA"); + TableSplit cTableSplit = new TableSplit(Bytes.toBytes("tableA"), Bytes.toBytes("lll"), + Bytes.toBytes("zzz"), "locationA"); assertEquals(0, aTableSplit.compareTo(aTableSplit)); assertEquals(0, bTableSplit.compareTo(bTableSplit)); @@ -105,18 +105,15 @@ public void testSplitTableEquals() { @Test @SuppressWarnings("deprecation") public void testToString() { - TableSplit split = - new TableSplit(TableName.valueOf(name.getMethodName()), "row-start".getBytes(), "row-end".getBytes(), - "location"); - String str = - "HBase table split(table name: " + name.getMethodName() + ", start row: row-start, " - + "end row: row-end, region location: location)"; + TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + "row-start".getBytes(), "row-end".getBytes(), "location"); + String str = "HBase table split(table name: " + name.getMethodName() + + ", start row: row-start, " + "end row: row-end, region location: location)"; Assert.assertEquals(str, split.toString()); split = new TableSplit((TableName) null, null, null, null); - str = - "HBase table split(table name: null, start row: null, " - + "end row: null, region location: null)"; + str = "HBase table split(table name: null, start row: null, " + + "end row: null, region location: null)"; Assert.assertEquals(str, split.toString()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java index 5ad1adef6a28..094b9249c5f2 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,7 +75,7 @@ /** * This tests the TableInputFormat and its recovery semantics */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTableInputFormat { @ClassRule @@ -109,7 +109,6 @@ public void before() throws IOException { /** * Setup a table with two rows and values. - * * @param tableName the name of the table to create * @return A Table instance for the created table. * @throws IOException @@ -120,7 +119,6 @@ public static Table createTable(byte[] tableName) throws IOException { /** * Setup a table with two rows and values per column family. - * * @param tableName * @return A Table instance for the created table. * @throws IOException @@ -142,15 +140,14 @@ public static Table createTable(byte[] tableName, byte[][] families) throws IOEx /** * Verify that the result and key have expected values. - * * @param r single row result * @param key the row key * @param expectedKey the expected key * @param expectedValue the expected value * @return true if the result contains the expected key and value, false otherwise. */ - static boolean checkResult(Result r, ImmutableBytesWritable key, - byte[] expectedKey, byte[] expectedValue) { + static boolean checkResult(Result r, ImmutableBytesWritable key, byte[] expectedKey, + byte[] expectedValue) { assertEquals(0, key.compareTo(expectedKey)); Map vals = r.getFamilyMap(FAMILY); byte[] value = vals.values().iterator().next(); @@ -159,9 +156,7 @@ static boolean checkResult(Result r, ImmutableBytesWritable key, } /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapred API. - * + * Create table data and run tests on specified htable using the o.a.h.hbase.mapred API. * @param table * @throws IOException */ @@ -192,11 +187,9 @@ static void runTestMapred(Table table) throws IOException { /** * Create a table that IOE's on first scanner next call - * * @throws IOException */ - static Table createIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -225,13 +218,10 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { } /** - * Create a table that throws a DoNoRetryIOException on first scanner next - * call - * + * Create a table that throws a DoNoRetryIOException on first scanner next call * @throws IOException */ - static Table createDNRIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -246,8 +236,7 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { ResultScanner scanner = mock(ResultScanner.class); invocation.callRealMethod(); // simulate NotServingRegionException - doThrow( - new NotServingRegionException("Injected simulated TimeoutException")) + doThrow(new NotServingRegionException("Injected simulated TimeoutException")) .when(scanner).next(); return scanner; } @@ -264,7 +253,6 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { /** * Run test assuming no errors using mapred api. - * * @throws IOException */ @Test @@ -275,7 +263,6 @@ public void testTableRecordReader() throws IOException { /** * Run test assuming Scanner IOException failure using mapred api, - * * @throws IOException */ @Test @@ -286,7 +273,6 @@ public void testTableRecordReaderScannerFail() throws IOException { /** * Run test assuming Scanner IOException failure using mapred api, - * * @throws IOException */ @Test(expected = IOException.class) @@ -297,7 +283,6 @@ public void testTableRecordReaderScannerFailTwice() throws IOException { /** * Run test assuming NotServingRegionException using mapred api. - * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test @@ -308,7 +293,6 @@ public void testTableRecordReaderScannerTimeout() throws IOException { /** * Run test assuming NotServingRegionException using mapred api. - * * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @Test(expected = org.apache.hadoop.hbase.NotServingRegionException.class) @@ -330,8 +314,8 @@ public void testExtensionOfTableInputFormatBase() throws IOException { @Test public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " - + "as it was given in 0.98."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "as it was given in 0.98."); final Table table = createTable(Bytes.toBytes("exampleDeprecatedTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleDeprecatedTIF.class); @@ -339,8 +323,8 @@ public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException { @Test public void testJobConfigurableExtensionOfTableInputFormatBase() throws IOException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " - + "using JobConfigurable."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "using JobConfigurable."); final Table table = createTable(Bytes.toBytes("exampleJobConfigurableTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleJobConfigurableTIF.class); @@ -378,17 +362,19 @@ public void configure(JobConf conf) { @Override public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) throws IOException { + OutputCollector output, Reporter reporter) throws IOException { for (Cell cell : value.listCells()) { - reporter.getCounter(TestTableInputFormat.class.getName() + ":row", - Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + reporter + .getCounter(TestTableInputFormat.class.getName() + ":row", + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) .increment(1l); - reporter.getCounter(TestTableInputFormat.class.getName() + ":family", - Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) + reporter + .getCounter(TestTableInputFormat.class.getName() + ":family", + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) .increment(1l); - reporter.getCounter(TestTableInputFormat.class.getName() + ":value", - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) + reporter + .getCounter(TestTableInputFormat.class.getName() + ":value", + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) .increment(1l); } } @@ -408,12 +394,11 @@ public void configure(JobConf job) { Table exampleTable = connection.getTable(TableName.valueOf("exampleDeprecatedTable")); // mandatory initializeTable(connection, exampleTable.getName()); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // mandatory setInputColumns(inputColumns); Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); // optional setRowFilter(exampleFilter); } catch (IOException exception) { @@ -440,7 +425,6 @@ protected void initialize(JobConf job) throws IOException { } } - public static class ExampleTIF extends TableInputFormatBase { @Override @@ -453,12 +437,11 @@ protected void initialize(JobConf job, String table) throws IOException { TableName tableName = TableName.valueOf(table); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // mandatory setInputColumns(inputColumns); Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); // optional setRowFilter(exampleFilter); } @@ -466,4 +449,3 @@ protected void initialize(JobConf job, String table) throws IOException { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java index e36847613062..cff3a831facd 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,11 +43,11 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) @SuppressWarnings("deprecation") public class TestTableMapReduce extends TestTableMapReduceBase { @@ -55,24 +55,23 @@ public class TestTableMapReduce extends TestTableMapReduceBase { public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestTableMapReduce.class); - private static final Logger LOG = - LoggerFactory.getLogger(TestTableMapReduce.class.getName()); + private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class.getName()); - protected Logger getLog() { return LOG; } + protected Logger getLog() { + return LOG; + } /** * Pass the given key and processed record reduce */ - static class ProcessContentsMapper extends MapReduceBase implements - TableMap { + static class ProcessContentsMapper extends MapReduceBase + implements TableMap { /** * Pass the key, and reversed value to reduce */ public void map(ImmutableBytesWritable key, Result value, - OutputCollector output, - Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { output.collect(key, TestTableMapReduceBase.map(key, value)); } } @@ -86,8 +85,8 @@ protected void runTestOnTable(Table table) throws IOException { jobConf.setJobName("process column contents"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(table.getName().getNameAsString(), - Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, - ImmutableBytesWritable.class, Put.class, jobConf); + Bytes.toString(INPUT_FAMILY), ProcessContentsMapper.class, ImmutableBytesWritable.class, + Put.class, jobConf); TableMapReduceUtil.initTableReduceJob(table.getName().getNameAsString(), IdentityTableReduce.class, jobConf); @@ -105,4 +104,3 @@ protected void runTestOnTable(Table table) throws IOException { } } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java index fe1600626695..fa765ab8f016 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableMapReduceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -57,15 +57,14 @@ import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableSet; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTableMapReduceUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestTableMapReduceUtil.class); - private static final Logger LOG = LoggerFactory - .getLogger(TestTableMapReduceUtil.class); + private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduceUtil.class); private static Table presidentsTable; private static final String TABLE_NAME = "People"; @@ -73,20 +72,19 @@ public class TestTableMapReduceUtil { private static final byte[] COLUMN_FAMILY = Bytes.toBytes("info"); private static final byte[] COLUMN_QUALIFIER = Bytes.toBytes("name"); - private static ImmutableSet presidentsRowKeys = ImmutableSet.of( - "president1", "president2", "president3"); - private static Iterator presidentNames = ImmutableSet.of( - "John F. Kennedy", "George W. Bush", "Barack Obama").iterator(); + private static ImmutableSet presidentsRowKeys = + ImmutableSet.of("president1", "president2", "president3"); + private static Iterator presidentNames = + ImmutableSet.of("John F. Kennedy", "George W. Bush", "Barack Obama").iterator(); - private static ImmutableSet actorsRowKeys = ImmutableSet.of("actor1", - "actor2"); - private static Iterator actorNames = ImmutableSet.of( - "Jack Nicholson", "Martin Freeman").iterator(); + private static ImmutableSet actorsRowKeys = ImmutableSet.of("actor1", "actor2"); + private static Iterator actorNames = + ImmutableSet.of("Jack Nicholson", "Martin Freeman").iterator(); private static String PRESIDENT_PATTERN = "president"; private static String ACTOR_PATTERN = "actor"; - private static ImmutableMap> relation = ImmutableMap - .of(PRESIDENT_PATTERN, presidentsRowKeys, ACTOR_PATTERN, actorsRowKeys); + private static ImmutableMap> relation = + ImmutableMap.of(PRESIDENT_PATTERN, presidentsRowKeys, ACTOR_PATTERN, actorsRowKeys); private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); @@ -133,12 +131,11 @@ private static void createPutCommand(Table table) throws IOException { } /** - * Check what the given number of reduce tasks for the given job configuration - * does not exceed the number of regions for the given table. + * Check what the given number of reduce tasks for the given job configuration does not exceed the + * number of regions for the given table. */ @Test - public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() - throws IOException { + public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Assert.assertNotNull(presidentsTable); Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); @@ -155,8 +152,7 @@ public void shouldNumberOfReduceTaskNotExceedNumberOfRegionsForGivenTable() } @Test - public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() - throws IOException { + public void shouldNumberOfMapTaskNotExceedNumberOfRegionsForGivenTable() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); TableMapReduceUtil.setNumReduceTasks(TABLE_NAME, jobConf); @@ -178,49 +174,42 @@ public void shoudBeValidMapReduceEvaluation() throws Exception { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(1); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), - ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, - jobConf); - TableMapReduceUtil.initTableReduceJob(TABLE_NAME, - ClassificatorRowReduce.class, jobConf); + ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); + TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { - if (jobConf != null) - FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); + if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } @Test @SuppressWarnings("deprecation") - public void shoudBeValidMapReduceWithPartitionerEvaluation() - throws IOException { + public void shoudBeValidMapReduceWithPartitionerEvaluation() throws IOException { Configuration cfg = UTIL.getConfiguration(); JobConf jobConf = new JobConf(cfg); try { jobConf.setJobName("process row task"); jobConf.setNumReduceTasks(2); TableMapReduceUtil.initTableMapJob(TABLE_NAME, new String(COLUMN_FAMILY), - ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, - jobConf); + ClassificatorMapper.class, ImmutableBytesWritable.class, Put.class, jobConf); - TableMapReduceUtil.initTableReduceJob(TABLE_NAME, - ClassificatorRowReduce.class, jobConf, HRegionPartitioner.class); + TableMapReduceUtil.initTableReduceJob(TABLE_NAME, ClassificatorRowReduce.class, jobConf, + HRegionPartitioner.class); RunningJob job = JobClient.runJob(jobConf); assertTrue(job.isSuccessful()); } finally { - if (jobConf != null) - FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); + if (jobConf != null) FileUtil.fullyDelete(new File(jobConf.get("hadoop.tmp.dir"))); } } @SuppressWarnings("deprecation") - static class ClassificatorRowReduce extends MapReduceBase implements - TableReduce { + static class ClassificatorRowReduce extends MapReduceBase + implements TableReduce { @Override public void reduce(ImmutableBytesWritable key, Iterator values, - OutputCollector output, Reporter reporter) - throws IOException { + OutputCollector output, Reporter reporter) throws IOException { String strKey = Bytes.toString(key.get()); List result = new ArrayList<>(); while (values.hasNext()) @@ -244,18 +233,17 @@ private void throwAccertionError(String errorMessage) throws AssertionError { } @SuppressWarnings("deprecation") - static class ClassificatorMapper extends MapReduceBase implements - TableMap { + static class ClassificatorMapper extends MapReduceBase + implements TableMap { @Override public void map(ImmutableBytesWritable row, Result result, - OutputCollector outCollector, - Reporter reporter) throws IOException { + OutputCollector outCollector, Reporter reporter) + throws IOException { String rowKey = Bytes.toString(result.getRow()); - final ImmutableBytesWritable pKey = new ImmutableBytesWritable( - Bytes.toBytes(PRESIDENT_PATTERN)); - final ImmutableBytesWritable aKey = new ImmutableBytesWritable( - Bytes.toBytes(ACTOR_PATTERN)); + final ImmutableBytesWritable pKey = + new ImmutableBytesWritable(Bytes.toBytes(PRESIDENT_PATTERN)); + final ImmutableBytesWritable aKey = new ImmutableBytesWritable(Bytes.toBytes(ACTOR_PATTERN)); ImmutableBytesWritable outKey = null; if (rowKey.startsWith(PRESIDENT_PATTERN)) { @@ -266,11 +254,9 @@ public void map(ImmutableBytesWritable row, Result result, throw new AssertionError("unexpected rowKey"); } - String name = Bytes.toString(result.getValue(COLUMN_FAMILY, - COLUMN_QUALIFIER)); - outCollector.collect(outKey, - new Put(Bytes.toBytes("rowKey2")) - .addColumn(COLUMN_FAMILY, COLUMN_QUALIFIER, Bytes.toBytes(name))); + String name = Bytes.toString(result.getValue(COLUMN_FAMILY, COLUMN_QUALIFIER)); + outCollector.collect(outKey, new Put(Bytes.toBytes("rowKey2")).addColumn(COLUMN_FAMILY, + COLUMN_QUALIFIER, Bytes.toBytes(name))); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java index 746ac532ac97..005c1d955983 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableOutputFormatConnectionExhaust.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,10 +37,9 @@ import org.slf4j.LoggerFactory; /** - * Spark creates many instances of TableOutputFormat within a single process. We need to make - * sure we can have many instances and not leak connections. - * - * This test creates a few TableOutputFormats and shouldn't fail due to ZK connection exhaustion. + * Spark creates many instances of TableOutputFormat within a single process. We need to make sure + * we can have many instances and not leak connections. This test creates a few TableOutputFormats + * and shouldn't fail due to ZK connection exhaustion. */ @Category(MediumTests.class) public class TestTableOutputFormatConnectionExhaust { @@ -77,16 +76,16 @@ public void before() throws IOException { } /** - * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase + * Open and close a TableOutputFormat. The closing the RecordWriter should release HBase * Connection (ZK) resources, and will throw exception if they are exhausted. */ - static void openCloseTableOutputFormat(int iter) throws IOException { + static void openCloseTableOutputFormat(int iter) throws IOException { LOG.info("Instantiating TableOutputFormat connection " + iter); JobConf conf = new JobConf(); conf.addResource(UTIL.getConfiguration()); conf.set(TableOutputFormat.OUTPUT_TABLE, TABLE); - TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, conf); + TableMapReduceUtil.initTableMapJob(TABLE, FAMILY, TableMap.class, ImmutableBytesWritable.class, + ImmutableBytesWritable.class, conf); TableOutputFormat tof = new TableOutputFormat(); RecordWriter rw = tof.getRecordWriter(null, conf, TABLE, null); rw.close(null); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java index 9e05a85c4806..dbdaf7bee554 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapred/TestTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { @ClassRule @@ -63,7 +63,7 @@ public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBa private static final byte[] aaa = Bytes.toBytes("aaa"); private static final byte[] after_zzz = Bytes.toBytes("zz{"); // 'z' + 1 => '{' private static final String COLUMNS = - Bytes.toString(FAMILIES[0]) + " " + Bytes.toString(FAMILIES[1]); + Bytes.toString(FAMILIES[0]) + " " + Bytes.toString(FAMILIES[1]); @Rule public TestName name = new TestName(); @@ -92,7 +92,7 @@ public void map(ImmutableBytesWritable key, Result value, public static class TestTableSnapshotReducer extends MapReduceBase implements Reducer { HBaseTestingUtility.SeenRowTracker rowTracker = - new HBaseTestingUtility.SeenRowTracker(aaa, after_zzz); + new HBaseTestingUtility.SeenRowTracker(aaa, after_zzz); @Override public void reduce(ImmutableBytesWritable key, Iterator values, @@ -117,19 +117,17 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { JobConf job = new JobConf(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals( - "Snapshot job should be configured for default LruBlockCache.", + Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals( - "Snapshot job should not use BucketCache.", - 0, job.getFloat("hbase.bucketcache.size", -1), 0.01); + Assert.assertEquals("Snapshot job should not use BucketCache.", 0, + job.getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -142,10 +140,9 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { @Test @Override public void testWithMockedMapReduceMultiRegion() throws Exception { - testWithMockedMapReduce( - UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true); - // It does not matter whether true or false is given to setLocalityEnabledTo, - // because it is not read in testWithMockedMapReduce(). + testWithMockedMapReduce(UTIL, "testWithMockedMapReduceMultiRegion", 10, 1, 10, true); + // It does not matter whether true or false is given to setLocalityEnabledTo, + // because it is not read in testWithMockedMapReduce(). } @Test @@ -165,9 +162,8 @@ public void testWithMapReduceAndOfflineHBaseMultiRegion() throws Exception { public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, String snapshotName, Path tmpTableDir) throws Exception { JobConf job = new JobConf(UTIL.getConfiguration()); - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, TestTableSnapshotMapper.class, + ImmutableBytesWritable.class, NullWritable.class, job, false, tmpTableDir); } @Override @@ -176,8 +172,7 @@ protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshot throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { - createTableAndSnapshot( - util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); + createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); JobConf job = new JobConf(util.getConfiguration()); // setLocalityEnabledTo is ignored no matter what is specified, so as to test the case that @@ -186,14 +181,13 @@ protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshot Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName); if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, - COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir); } // mapred doesn't support start and end keys? o.O @@ -213,7 +207,7 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected Assert.assertEquals(expectedNumSplits, splits.length); HBaseTestingUtility.SeenRowTracker rowTracker = - new HBaseTestingUtility.SeenRowTracker(startRow, stopRow); + new HBaseTestingUtility.SeenRowTracker(startRow, stopRow); // SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY is not explicitly specified, // so the default value is taken. @@ -226,7 +220,7 @@ private void verifyWithMockedMapReduce(JobConf job, int numRegions, int expected if (localityEnabled) { // When localityEnabled is true, meant to verify split.getLocations() // by the following statement: - // Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); + // Assert.assertTrue(split.getLocations() != null && split.getLocations().length != 0); // However, getLocations() of some splits could return an empty array (length is 0), // so drop the verification on length. // TODO: investigate how to verify split.getLocations() when localityEnabled is true @@ -266,9 +260,9 @@ protected void testWithMapReduceImpl(HBaseTestingUtility util, TableName tableNa // this is also called by the IntegrationTestTableSnapshotInputFormat public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName, String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions, - int numSplitsPerRegion,int expectedNumSplits, boolean shutdownCluster) throws Exception { + int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception { - //create the table and snapshot + // create the table and snapshot createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions); if (shutdownCluster) { @@ -283,15 +277,14 @@ public static void doTestWithMapReduce(HBaseTestingUtility util, TableName table org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(jobConf, TestTableSnapshotInputFormat.class); - if(numSplitsPerRegion > 1) { + if (numSplitsPerRegion > 1) { TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, - TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, jobConf, true, tableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, jobConf, + true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS, - TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, jobConf, true, tableDir); + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, jobConf, + true, tableDir); } jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java index 8df7a6c74700..f02c13c8f63a 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/MultiTableInputFormatTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; @@ -79,8 +78,7 @@ public static void setUpBeforeClass() throws Exception { // create and fill table for (String tableName : TABLES) { try (Table table = - TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName), - INPUT_FAMILY, 4)) { + TEST_UTIL.createMultiRegionTable(TableName.valueOf(tableName), INPUT_FAMILY, 4)) { TEST_UTIL.loadTable(table, INPUT_FAMILY, false); } } @@ -100,11 +98,10 @@ public void tearDown() throws Exception { /** * Pass the key and value to reducer. */ - public static class ScanMapper extends - TableMapper { + public static class ScanMapper + extends TableMapper { /** * Pass the key and value to reduce. - * * @param key The key, here "aaa", "aab" etc. * @param value The value is the same as the key. * @param context The task context. @@ -121,15 +118,13 @@ public void makeAssertions(ImmutableBytesWritable key, Result value) throws IOEx if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> cf = - value.getMap(); + Map>> cf = value.getMap(); if (!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } String val = Bytes.toStringBinary(value.getValue(INPUT_FAMILY, null)); - LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) + - ", value -> " + val); + LOG.debug("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); } } @@ -137,16 +132,13 @@ public void makeAssertions(ImmutableBytesWritable key, Result value) throws IOEx * Checks the last and first keys seen against the scanner boundaries. */ public static class ScanReducer - extends - Reducer { + extends Reducer { private String first = null; private String last = null; @Override - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) - throws IOException, InterruptedException { + protected void reduce(ImmutableBytesWritable key, Iterable values, + Context context) throws IOException, InterruptedException { makeAssertions(key, values); } @@ -155,8 +147,8 @@ protected void makeAssertions(ImmutableBytesWritable key, int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.debug("reduce: key[" + count + "] -> " + - Bytes.toStringBinary(key.get()) + ", value -> " + val); + LOG.debug( + "reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; @@ -165,8 +157,7 @@ protected void makeAssertions(ImmutableBytesWritable key, } @Override - protected void cleanup(Context context) throws IOException, - InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); cleanup(c); } @@ -174,10 +165,8 @@ protected void cleanup(Context context) throws IOException, protected void cleanup(Configuration c) { String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); - LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + - startRow + "\""); - LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + - "\""); + LOG.info("cleanup: first -> \"" + first + "\", start row -> \"" + startRow + "\""); + LOG.info("cleanup: last -> \"" + last + "\", last row -> \"" + lastRow + "\""); if (startRow != null && startRow.length() > 0) { assertEquals(startRow, first); } @@ -188,41 +177,38 @@ protected void cleanup(Configuration c) { } @Test - public void testScanEmptyToEmpty() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanEmptyToEmpty() + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, null, null); } @Test - public void testScanEmptyToAPP() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanEmptyToAPP() + throws IOException, InterruptedException, ClassNotFoundException { testScan(null, "app", "apo"); } @Test - public void testScanOBBToOPP() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanOBBToOPP() throws IOException, InterruptedException, ClassNotFoundException { testScan("obb", "opp", "opo"); } @Test - public void testScanYZYToEmpty() throws IOException, InterruptedException, - ClassNotFoundException { + public void testScanYZYToEmpty() + throws IOException, InterruptedException, ClassNotFoundException { testScan("yzy", null, "zzz"); } /** * Tests a MR scan using specific start and stop rows. - * * @throws IOException * @throws ClassNotFoundException * @throws InterruptedException */ private void testScan(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { - String jobName = - "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + - (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); @@ -268,5 +254,4 @@ protected void runJob(String jobName, Configuration c, List scans) protected abstract void initJob(List scans, Job job) throws IOException; - } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java index e022bfdbd494..091bc2197830 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/NMapInputFormat.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +21,6 @@ import java.io.DataOutput; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; @@ -33,17 +31,16 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext; /** - * Input format that creates a configurable number of map tasks - * each provided with a single row of NullWritables. This can be - * useful when trying to write mappers which don't have any real - * input (eg when the mapper is simply producing random data as output) + * Input format that creates a configurable number of map tasks each provided with a single row of + * NullWritables. This can be useful when trying to write mappers which don't have any real input + * (eg when the mapper is simply producing random data as output) */ public class NMapInputFormat extends InputFormat { private static final String NMAPS_KEY = "nmapinputformat.num.maps"; @Override - public RecordReader createRecordReader( - InputSplit split, TaskAttemptContext tac) { + public RecordReader createRecordReader(InputSplit split, + TaskAttemptContext tac) { return new SingleRecordReader<>(NullWritable.get(), NullWritable.get()); } @@ -85,8 +82,7 @@ public void write(DataOutput out) { } } - private static class SingleRecordReader - extends RecordReader { + private static class SingleRecordReader extends RecordReader { private final K key; private final V value; @@ -107,7 +103,7 @@ public K getCurrentKey() { } @Override - public V getCurrentValue(){ + public V getCurrentValue() { return value; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java index 83f8e0c120fa..b56feb68cb52 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TableSnapshotInputFormatTestBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,7 +53,7 @@ public abstract class TableSnapshotInputFormatTestBase { private static final Logger LOG = LoggerFactory.getLogger(TableSnapshotInputFormatTestBase.class); protected final HBaseTestingUtility UTIL = new HBaseTestingUtility(); protected static final int NUM_REGION_SERVERS = 2; - protected static final byte[][] FAMILIES = {Bytes.toBytes("f1"), Bytes.toBytes("f2")}; + protected static final byte[][] FAMILIES = { Bytes.toBytes("f1"), Bytes.toBytes("f2") }; protected FileSystem fs; protected Path rootDir; @@ -61,9 +61,9 @@ public abstract class TableSnapshotInputFormatTestBase { @Before public void setupCluster() throws Exception { setupConf(UTIL.getConfiguration()); - StartMiniClusterOption option = StartMiniClusterOption.builder() - .numRegionServers(NUM_REGION_SERVERS).numDataNodes(NUM_REGION_SERVERS) - .createRootDir(true).build(); + StartMiniClusterOption option = + StartMiniClusterOption.builder().numRegionServers(NUM_REGION_SERVERS) + .numDataNodes(NUM_REGION_SERVERS).createRootDir(true).build(); UTIL.startMiniCluster(option); rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); fs = rootDir.getFileSystem(UTIL.getConfiguration()); @@ -80,12 +80,12 @@ private static void setupConf(Configuration conf) { } protected abstract void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) - throws Exception; + int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) + throws Exception; protected abstract void testWithMapReduceImpl(HBaseTestingUtility util, TableName tableName, - String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, - int expectedNumSplits, boolean shutdownCluster) throws Exception; + String snapshotName, Path tableDir, int numRegions, int numSplitsPerRegion, + int expectedNumSplits, boolean shutdownCluster) throws Exception; protected abstract byte[] getStartRow(); @@ -128,7 +128,7 @@ public void testRestoreSnapshotDoesNotCreateBackRefLinks() throws Exception { Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName,tmpTableDir); + testRestoreSnapshotDoesNotCreateBackRefLinksInit(tableName, snapshotName, tmpTableDir); Path rootDir = CommonFSUtils.getRootDir(UTIL.getConfiguration()); for (Path regionDir : FSUtils.getRegionDirs(fs, @@ -169,32 +169,31 @@ protected void testWithMapReduce(HBaseTestingUtility util, String snapshotName, } protected static void verifyRowFromMap(ImmutableBytesWritable key, Result result) - throws IOException { + throws IOException { byte[] row = key.get(); CellScanner scanner = result.cellScanner(); while (scanner.advance()) { Cell cell = scanner.current(); - //assert that all Cells in the Result have the same key - Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, - cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())); + // assert that all Cells in the Result have the same key + Assert.assertEquals(0, Bytes.compareTo(row, 0, row.length, cell.getRowArray(), + cell.getRowOffset(), cell.getRowLength())); } for (byte[] family : FAMILIES) { byte[] actual = result.getValue(family, family); - Assert.assertArrayEquals( - "Row in snapshot does not match, expected:" + Bytes.toString(row) + " ,actual:" + Bytes - .toString(actual), row, actual); + Assert.assertArrayEquals("Row in snapshot does not match, expected:" + Bytes.toString(row) + + " ,actual:" + Bytes.toString(actual), + row, actual); } } protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName, - String snapshotName, byte[] startRow, byte[] endRow, int numRegions) - throws Exception { + String snapshotName, byte[] startRow, byte[] endRow, int numRegions) throws Exception { try { LOG.debug("Ensuring table doesn't exist."); util.deleteTable(tableName); - } catch(Exception ex) { + } catch (Exception ex) { // ignore } @@ -214,8 +213,8 @@ protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName FileSystem fs = rootDir.getFileSystem(util.getConfiguration()); LOG.info("snapshot"); - SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, - Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true); + SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName, Arrays.asList(FAMILIES), null, + snapshotName, rootDir, fs, true); LOG.info("load different values"); byte[] value = Bytes.toBytes("after_snapshot_value"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java index 0a7a9309899b..a8562fb3b3a5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedHFileOutputFormat2.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -111,13 +110,12 @@ import org.slf4j.LoggerFactory; /** - * Simple test for {@link HFileOutputFormat2}. - * Sets up and runs a mapreduce job that writes hfile output. - * Creates a few inner classes to implement splits and an inputformat that - * emits keys and values like those of {@link PerformanceEvaluation}. + * Simple test for {@link HFileOutputFormat2}. Sets up and runs a mapreduce job that writes hfile + * output. Creates a few inner classes to implement splits and an inputformat that emits keys and + * values like those of {@link PerformanceEvaluation}. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) -public class TestCellBasedHFileOutputFormat2 { +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +public class TestCellBasedHFileOutputFormat2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -126,10 +124,10 @@ public class TestCellBasedHFileOutputFormat2 { private final static int ROWSPERSPLIT = 1024; public static final byte[] FAMILY_NAME = TestHRegionFileSystem.FAMILY_NAME; - private static final byte[][] FAMILIES = { - Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B"))}; - private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", - "TestTable3").map(TableName::valueOf).toArray(TableName[]::new); + private static final byte[][] FAMILIES = + { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; + private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", "TestTable3") + .map(TableName::valueOf).toArray(TableName[]::new); private HBaseTestingUtility util = new HBaseTestingUtility(); @@ -139,45 +137,39 @@ public class TestCellBasedHFileOutputFormat2 { * Simple mapper that makes KeyValue output. */ static class RandomKVGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; - private static final int KEYLEN_DEFAULT=10; - private static final String KEYLEN_CONF="randomkv.key.length"; + private static final int KEYLEN_DEFAULT = 10; + private static final String KEYLEN_CONF = "randomkv.key.length"; private int valLength; - private static final int VALLEN_DEFAULT=10; - private static final String VALLEN_CONF="randomkv.val.length"; - private static final byte [] QUALIFIER = Bytes.toBytes("data"); + private static final int VALLEN_DEFAULT = 10; + private static final String VALLEN_CONF = "randomkv.val.length"; + private static final byte[] QUALIFIER = Bytes.toBytes("data"); private boolean multiTableMapper = false; private TableName[] tables = null; - @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException ,InterruptedException - { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -210,8 +202,7 @@ protected void map( * Simple mapper that makes Put output. */ static class RandomPutGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; private static final int KEYLEN_DEFAULT = 10; @@ -225,28 +216,25 @@ static class RandomPutGeneratingMapper private TableName[] tables = null; @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException, InterruptedException { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -294,28 +282,27 @@ private void setupRandomGeneratorMapper(Job job, boolean putSortReducer) { } /** - * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if - * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}. + * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if passed a keyvalue whose + * timestamp is {@link HConstants#LATEST_TIMESTAMP}. * @see HBASE-2615 */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test - public void test_LATEST_TIMESTAMP_isReplaced() - throws Exception { + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test + public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); + Path dir = util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be - // changed by call to write. Check all in kv is same but ts. + // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be + // changed by call to write. Check all in kv is same but ts. KeyValue kv = new KeyValue(b, b, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); @@ -326,7 +313,7 @@ public void test_LATEST_TIMESTAMP_isReplaced() assertNotSame(original.getTimestamp(), kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp()); - // Test 2. Now test passing a kv that has explicit ts. It should not be + // Test 2. Now test passing a kv that has explicit ts. It should not be // changed by call to record write. kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b); original = kv.clone(); @@ -338,26 +325,25 @@ public void test_LATEST_TIMESTAMP_isReplaced() } } - private TaskAttemptContext createTestTaskAttemptContext(final Job job) - throws Exception { + private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Exception { HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class); - TaskAttemptContext context = hadoop.createTestTaskAttemptContext( - job, "attempt_201402131733_0001_m_000000_0"); + TaskAttemptContext context = + hadoop.createTestTaskAttemptContext(job, "attempt_201402131733_0001_m_000000_0"); return context; } /* - * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE - * metadata used by time-restricted scans. + * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE metadata used by + * time-restricted scans. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_TIMERANGE_present"); - LOG.info("Timerange dir writing to dir: "+ dir); + Path dir = util.getDataTestDir("test_TIMERANGE_present"); + LOG.info("Timerange dir writing to dir: " + dir); try { // build a record writer using HFileOutputFormat2 Job job = new Job(conf); @@ -367,13 +353,13 @@ public void test_TIMERANGE() throws Exception { writer = hof.getRecordWriter(context); // Pass two key values with explicit times stamps - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); // value 1 with timestamp 2000 KeyValue kv = new KeyValue(b, b, b, 2000, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); - assertEquals(original,kv); + assertEquals(original, kv); // value 2 with timestamp 1000 kv = new KeyValue(b, b, b, 1000, b); @@ -395,14 +381,13 @@ public void test_TIMERANGE() throws Exception { // open as HFile Reader and pull out TIMERANGE FileInfo. HFile.Reader rd = HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); - Map finfo = rd.getHFileInfo(); + Map finfo = rd.getHFileInfo(); byte[] range = finfo.get("TIMERANGE".getBytes("UTF-8")); assertNotNull(range); // unmarshall and check values. TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(range); - LOG.info(timeRangeTracker.getMin() + - "...." + timeRangeTracker.getMax()); + LOG.info(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); assertEquals(1000, timeRangeTracker.getMin()); assertEquals(2000, timeRangeTracker.getMax()); rd.close(); @@ -415,7 +400,8 @@ public void test_TIMERANGE() throws Exception { /** * Run small MR job. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testWritingPEData() throws Exception { Configuration conf = util.getConfiguration(); Path testDir = util.getDataTestDirOnTestFS("testWritingPEData"); @@ -433,8 +419,8 @@ public void testWritingPEData() throws Exception { byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; - Arrays.fill(startKey, (byte)0); - Arrays.fill(endKey, (byte)0xff); + Arrays.fill(startKey, (byte) 0); + Arrays.fill(endKey, (byte) 0xff); job.setPartitionerClass(SimpleTotalOrderPartitioner.class); // Set start and end rows for partitioner. @@ -444,29 +430,26 @@ public void testWritingPEData() throws Exception { job.setOutputFormatClass(HFileOutputFormat2.class); job.setNumReduceTasks(4); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); FileOutputFormat.setOutputPath(job, testDir); assertTrue(job.waitForCompletion(false)); - FileStatus [] files = fs.listStatus(testDir); + FileStatus[] files = fs.listStatus(testDir); assertTrue(files.length > 0); } /** - * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into - * hfile. + * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into hfile. */ @Test - public void test_WritingTagData() - throws Exception { + public void test_WritingTagData() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); final String HFILE_FORMAT_VERSION_CONF_KEY = "hfile.format.version"; conf.setInt(HFILE_FORMAT_VERSION_CONF_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("WritingTagData"); + Path dir = util.getDataTestDir("WritingTagData"); try { conf.set(HFileOutputFormat2.OUTPUT_TABLE_NAME_CONF_KEY, TABLE_NAMES[0].getNameAsString()); // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs @@ -476,9 +459,9 @@ public void test_WritingTagData() context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - List< Tag > tags = new ArrayList<>(); + List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670))); KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags); writer.write(new ImmutableBytesWritable(), kv); @@ -486,15 +469,15 @@ public void test_WritingTagData() writer = null; FileSystem fs = dir.getFileSystem(conf); RemoteIterator iterator = fs.listFiles(dir, true); - while(iterator.hasNext()) { + while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); scanner.seekTo(); Cell cell = scanner.getCell(); - List tagsFromCell = TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(), - cell.getTagsLength()); + List tagsFromCell = + TagUtil.asList(cell.getTagsArray(), cell.getTagsOffset(), cell.getTagsLength()); assertTrue(tagsFromCell.size() > 0); for (Tag tag : tagsFromCell) { assertTrue(tag.getType() == TagType.TTL_TAG_TYPE); @@ -506,11 +489,12 @@ public void test_WritingTagData() } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); - conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration") - .toString()); + conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, + util.getDataTestDir("testJobConfiguration").toString()); Job job = new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); Table table = Mockito.mock(Table.class); @@ -521,14 +505,14 @@ public void testJobConfiguration() throws Exception { assertEquals(job.getNumReduceTasks(), 4); } - private byte [][] generateRandomStartKeys(int numKeys) { + private byte[][] generateRandomStartKeys(int numKeys) { Random random = new Random(); byte[][] ret = new byte[numKeys][]; // first region start key is always empty ret[0] = HConstants.EMPTY_BYTE_ARRAY; for (int i = 1; i < numKeys; i++) { ret[i] = - PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); + PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); } return ret; } @@ -543,34 +527,37 @@ private byte[][] generateRandomSplitKeys(int numKeys) { return ret; } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoad() throws Exception { LOG.info("\nStarting test testMRIncrementalLoad\n"); doIncrementalLoadTest(false, false, false, "testMRIncrementalLoad"); } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithSplit() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n"); doIncrementalLoadTest(true, false, false, "testMRIncrementalLoadWithSplit"); } /** - * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true - * This test could only check the correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY - * is set to true. Because MiniHBaseCluster always run with single hostname (and different ports), - * it's not possible to check the region locality by comparing region locations and DN hostnames. - * When MiniHBaseCluster supports explicit hostnames parameter (just like MiniDFSCluster does), - * we could test region locality features more easily. + * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true This test could only check the + * correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY is set to true. Because + * MiniHBaseCluster always run with single hostname (and different ports), it's not possible to + * check the region locality by comparing region locations and DN hostnames. When MiniHBaseCluster + * supports explicit hostnames parameter (just like MiniDFSCluster does), we could test region + * locality features more easily. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithLocality() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithLocality\n"); doIncrementalLoadTest(false, true, false, "testMRIncrementalLoadWithLocality1"); doIncrementalLoadTest(true, true, false, "testMRIncrementalLoadWithLocality2"); } - //@Ignore("Wahtevs") + // @Ignore("Wahtevs") @Test public void testMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithPutSortReducer\n"); @@ -578,17 +565,16 @@ public void testMRIncrementalLoadWithPutSortReducer() throws Exception { } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, String tableStr) throws Exception { - doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, - Arrays.asList(tableStr)); + boolean putSortReducer, String tableStr) throws Exception { + doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, + Arrays.asList(tableStr)); } @Test public void testMultiMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMultiMRIncrementalLoadWithPutSortReducer\n"); doIncrementalLoadTest(false, false, true, - Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList - ())); + Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList())); } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, @@ -643,8 +629,7 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe if (allTables.containsKey(tf.getPath().getName())) { ++numTableDirs; tablePath = tf.getPath(); - } - else { + } else { continue; } } @@ -678,9 +663,8 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe byte[][] newSplitKeys = generateRandomSplitKeys(14); Table table = util.createTable(chosenTable.getName(), FAMILIES, newSplitKeys); - while (util.getConnection().getRegionLocator(chosenTable.getName()) - .getAllRegionLocations().size() != 15 || - !admin.isTableAvailable(table.getName())) { + while (util.getConnection().getRegionLocator(chosenTable.getName()).getAllRegionLocations() + .size() != 15 || !admin.isTableAvailable(table.getName())) { Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } @@ -696,19 +680,19 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } Table currentTable = allTables.get(tableNameStr); TableName currentTableName = currentTable.getName(); - new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, singleTableInfo - .getRegionLocator()); + new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, + singleTableInfo.getRegionLocator()); // Ensure data shows up int expectedRows = 0; if (putSortReducer) { // no rows should be extracted assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); } else { expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); Scan scan = new Scan(); ResultScanner results = currentTable.getScanner(scan); for (Result res : results) { @@ -741,14 +725,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } admin.enableTable(currentTableName); util.waitTableAvailable(currentTableName); - assertEquals("Data should remain after reopening of regions", - tableDigestBefore, util.checksumRows(currentTable)); + assertEquals("Data should remain after reopening of regions", tableDigestBefore, + util.checksumRows(currentTable)); } } finally { for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) { - tableInfoSingle.getRegionLocator().close(); + tableInfoSingle.getRegionLocator().close(); } - for (Entry singleTable : allTables.entrySet() ) { + for (Entry singleTable : allTables.entrySet()) { singleTable.getValue().close(); util.deleteTable(singleTable.getValue().getName()); } @@ -757,14 +741,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } } - private void runIncrementalPELoad(Configuration conf, List tableInfo, Path outDir, - boolean putSortReducer) throws IOException, - InterruptedException, ClassNotFoundException { + private void runIncrementalPELoad(Configuration conf, + List tableInfo, Path outDir, boolean putSortReducer) + throws IOException, InterruptedException, ClassNotFoundException { Job job = new Job(conf, "testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad")); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - CellSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + CellSerialization.class.getName()); setupRandomGeneratorMapper(job, putSortReducer); if (tableInfo.size() > 1) { MultiTableHFileOutputFormat.configureIncrementalLoad(job, tableInfo); @@ -773,30 +757,28 @@ private void runIncrementalPELoad(Configuration conf, List retrievedFamilyToCompressionMap = HFileOutputFormat2 - .createFamilyCompressionMap(conf); + Map retrievedFamilyToCompressionMap = + HFileOutputFormat2.createFamilyCompressionMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToCompression.entrySet()) { - assertEquals("Compression configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToCompressionMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes("UTF-8"))); } } } @@ -827,21 +807,17 @@ private void setupMockColumnFamiliesForCompression(Table table, Map familyToCompression) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToCompression.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setCompressionType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setCompressionType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForCompression (int numCfs) { + private Map getMockColumnFamiliesForCompression(int numCfs) { Map familyToCompression = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { @@ -859,40 +835,34 @@ private void setupMockColumnFamiliesForCompression(Table table, return familyToCompression; } - /** * Test for {@link HFileOutputFormat2#configureBloomType(HTableDescriptor, Configuration)} and - * {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. - * Tests that the compression map is correctly serialized into - * and deserialized from configuration - * + * {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the compression + * map is correctly serialized into and deserialized from configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException { for (int numCfs = 0; numCfs <= 2; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBloomType = - getMockColumnFamiliesForBloomType(numCfs); + Map familyToBloomType = getMockColumnFamiliesForBloomType(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBloomType(table, - familyToBloomType); + setupMockColumnFamiliesForBloomType(table, familyToBloomType); conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, - Arrays.asList(table.getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBloomTypeMap = - HFileOutputFormat2 - .createFamilyBloomTypeMap(conf); + HFileOutputFormat2.createFamilyBloomTypeMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToBloomType.entrySet()) { - assertEquals("BloomType configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals("BloomType configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes("UTF-8"))); } } } @@ -901,29 +871,24 @@ private void setupMockColumnFamiliesForBloomType(Table table, Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setBloomFilterType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setBloomFilterType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBloomType (int numCfs) { + private Map getMockColumnFamiliesForBloomType(int numCfs) { Map familyToBloomType = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBloomType.put("Family1!@#!@#&", BloomType.ROW); } if (numCfs-- > 0) { - familyToBloomType.put("Family2=asdads&!AASD", - BloomType.ROWCOL); + familyToBloomType.put("Family2=asdads&!AASD", BloomType.ROWCOL); } if (numCfs-- > 0) { familyToBloomType.put("Family3", BloomType.NONE); @@ -933,39 +898,32 @@ private void setupMockColumnFamiliesForBloomType(Table table, /** * Test for {@link HFileOutputFormat2#configureBlockSize(HTableDescriptor, Configuration)} and - * {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. - * Tests that the compression map is correctly serialized into - * and deserialized from configuration - * + * {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the compression + * map is correctly serialized into and deserialized from configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBlockSize = - getMockColumnFamiliesForBlockSize(numCfs); + Map familyToBlockSize = getMockColumnFamiliesForBlockSize(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBlockSize(table, - familyToBlockSize); + setupMockColumnFamiliesForBlockSize(table, familyToBlockSize); conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.blockSizeDetails, Arrays.asList(table - .getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.blockSizeDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBlockSizeMap = - HFileOutputFormat2 - .createFamilyBlockSizeMap(conf); + HFileOutputFormat2.createFamilyBlockSizeMap(conf); // test that we have a value for all column families that matches with the // used mock values - for (Entry entry : familyToBlockSize.entrySet() - ) { - assertEquals("BlockSize configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes("UTF-8"))); + for (Entry entry : familyToBlockSize.entrySet()) { + assertEquals("BlockSize configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes("UTF-8"))); } } } @@ -974,33 +932,27 @@ private void setupMockColumnFamiliesForBlockSize(Table table, Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setBlocksize(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setBlocksize(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBlockSize (int numCfs) { + private Map getMockColumnFamiliesForBlockSize(int numCfs) { Map familyToBlockSize = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBlockSize.put("Family1!@#!@#&", 1234); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { familyToBlockSize.put("Family3", 0); @@ -1010,39 +962,36 @@ private void setupMockColumnFamiliesForBlockSize(Table table, /** * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTableDescriptor, Configuration)} - * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. - * Tests that the compression map is correctly serialized into - * and deserialized from configuration - * + * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that the + * compression map is correctly serialized into and deserialized from configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToDataBlockEncoding = getMockColumnFamiliesForDataBlockEncoding(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForDataBlockEncoding(table, - familyToDataBlockEncoding); + setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding); HTableDescriptor tableDescriptor = table.getTableDescriptor(); conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.dataBlockEncodingDetails, Arrays - .asList(tableDescriptor))); + HFileOutputFormat2.serializeColumnFamilyAttribute( + HFileOutputFormat2.dataBlockEncodingDetails, Arrays.asList(tableDescriptor))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToDataBlockEncodingMap = - HFileOutputFormat2 - .createFamilyDataBlockEncodingMap(conf); + HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToDataBlockEncoding.entrySet()) { - assertEquals("DataBlockEncoding configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals( + "DataBlockEncoding configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), + retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes("UTF-8"))); } } } @@ -1051,33 +1000,27 @@ private void setupMockColumnFamiliesForDataBlockEncoding(Table table, Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setDataBlockEncoding(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setDataBlockEncoding(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForDataBlockEncoding (int numCfs) { + private Map getMockColumnFamiliesForDataBlockEncoding(int numCfs) { Map familyToDataBlockEncoding = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.FAST_DIFF); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.FAST_DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.PREFIX); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.PREFIX); } if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE); @@ -1086,12 +1029,8 @@ private void setupMockColumnFamiliesForDataBlockEncoding(Table table, } private void setupMockStartKeys(RegionLocator table) throws IOException { - byte[][] mockKeys = new byte[][] { - HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaa"), - Bytes.toBytes("ggg"), - Bytes.toBytes("zzz") - }; + byte[][] mockKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaa"), + Bytes.toBytes("ggg"), Bytes.toBytes("zzz") }; Mockito.doReturn(mockKeys).when(table).getStartKeys(); } @@ -1101,10 +1040,11 @@ private void setupMockTableName(RegionLocator table) throws IOException { } /** - * Test that {@link HFileOutputFormat2} RecordWriter uses compression and - * bloom filter settings from the column family descriptor + * Test that {@link HFileOutputFormat2} RecordWriter uses compression and bloom filter settings + * from the column family descriptor */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testColumnFamilySettings() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; @@ -1116,7 +1056,7 @@ public void testColumnFamilySettings() throws Exception { RegionLocator regionLocator = Mockito.mock(RegionLocator.class); HTableDescriptor htd = new HTableDescriptor(TABLE_NAMES[0]); Mockito.doReturn(htd).when(table).getTableDescriptor(); - for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) { + for (HColumnDescriptor hcd : HBaseTestingUtility.generateColumnDescriptors()) { htd.addFamily(hcd); } @@ -1164,11 +1104,12 @@ public void testColumnFamilySettings() throws Exception { byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY); if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE"); - assertEquals("Incorrect bloom filter used for column family " + familyStr + - "(reader: " + reader + ")", + assertEquals( + "Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter))); - assertEquals("Incorrect compression used for column family " + familyStr + - "(reader: " + reader + ")", hcd.getCompressionType(), reader.getFileContext().getCompression()); + assertEquals( + "Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", + hcd.getCompressionType(), reader.getFileContext().getCompression()); } } finally { dir.getFileSystem(conf).delete(dir, true); @@ -1176,8 +1117,8 @@ public void testColumnFamilySettings() throws Exception { } /** - * Write random values to the writer assuming a table created using - * {@link #FAMILIES} as column family descriptors + * Write random values to the writer assuming a table created using {@link #FAMILIES} as column + * family descriptors */ private void writeRandomKeyValues(RecordWriter writer, TaskAttemptContext context, Set families, int numRows) @@ -1188,7 +1129,7 @@ private void writeRandomKeyValues(RecordWriter wri int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; - final byte [] qualifier = Bytes.toBytes("data"); + final byte[] qualifier = Bytes.toBytes("data"); Random random = new Random(); for (int i = 0; i < numRows; i++) { @@ -1204,12 +1145,12 @@ private void writeRandomKeyValues(RecordWriter wri } /** - * This test is to test the scenario happened in HBASE-6901. - * All files are bulk loaded and excluded from minor compaction. - * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException - * will be thrown. + * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and + * excluded from minor compaction. Without the fix of HBASE-6901, an + * ArrayIndexOutOfBoundsException will be thrown. */ - @Ignore ("Flakey: See HBASE-9051") @Test + @Ignore("Flakey: See HBASE-9051") + @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1224,28 +1165,29 @@ public void testExcludeAllFromMinorCompaction() throws Exception { assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), - new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), - Bytes.toString(FAMILIES[0]))); + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), + Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // Generate two bulk load files - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), + conn.getRegionLocator(TABLE_NAMES[0]))), + testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator); } // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("LoadIncrementalHFiles should put expected data in table", - expectedRows, util.countRows(table)); + assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1290,7 +1232,8 @@ public Boolean call() throws Exception { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1298,17 +1241,17 @@ public void testExcludeMinorCompaction() throws Exception { util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()){ + Admin admin = conn.getAdmin()) { Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); Table table = util.createTable(TABLE_NAMES[0], FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), - new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), - Bytes.toString(FAMILIES[0]))); + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), + Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // put some data in it and flush to create a storefile @@ -1325,20 +1268,20 @@ public Boolean call() throws Exception { }, 5000); // Generate a bulk load file with more rows - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), regionLocator)), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), regionLocator)), + testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator); // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("LoadIncrementalHFiles should put expected data in table", - expectedRows + 1, util.countRows(table)); + assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows + 1, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1396,16 +1339,17 @@ public void manualTest(String args[]) throws Exception { Table table = util.createTable(tname, FAMILIES, splitKeys); } else if ("incremental".equals(args[0])) { TableName tname = TableName.valueOf(args[1]); - try(Connection c = ConnectionFactory.createConnection(conf); + try (Connection c = ConnectionFactory.createConnection(conf); Admin admin = c.getAdmin(); RegionLocator regionLocator = c.getRegionLocator(tname)) { Path outDir = new Path("incremental-out"); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin - .getTableDescriptor(tname), regionLocator)), outDir, false); + runIncrementalPELoad(conf, + Arrays.asList( + new HFileOutputFormat2.TableInfo(admin.getTableDescriptor(tname), regionLocator)), + outDir, false); } } else { - throw new RuntimeException( - "usage: TestHFileOutputFormat2 newtable | incremental"); + throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental"); } } @@ -1415,9 +1359,10 @@ public void testBlockStoragePolicy() throws Exception { Configuration conf = util.getConfiguration(); conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD"); - conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + - Bytes.toString(HFileOutputFormat2.combineTableNameSuffix( - TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD"); + conf.set( + HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString( + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0])), + "ONE_SSD"); Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0])); Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1])); util.startMiniDFSCluster(3); @@ -1436,9 +1381,9 @@ public void testBlockStoragePolicy() throws Exception { // alter table cf schema to change storage policies HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); spA = getStoragePolicyName(fs, cf1Dir); spB = getStoragePolicyName(fs, cf2Dir); LOG.debug("Storage policy of cf 0: [" + spA + "]."); @@ -1493,4 +1438,3 @@ private String getStoragePolicyNameForOldHDFSVersion(FileSystem fs, Path path) { return null; } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java index 954454613648..48199ef59d83 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedImportExport2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -94,7 +94,7 @@ /** * Tests the table import and table export MR job functionality */ -@Category({VerySlowMapReduceTests.class, MediumTests.class}) +@Category({ VerySlowMapReduceTests.class, MediumTests.class }) public class TestCellBasedImportExport2 { @ClassRule @@ -125,7 +125,7 @@ public static void beforeClass() throws Throwable { UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); UTIL.startMiniCluster(); FQ_OUTPUT_DIR = - new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); + new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); } @AfterClass @@ -209,48 +209,43 @@ public void testSimpleCase() throws Throwable { t.put(p); } - String[] args = new String[] { - // Only export row1 & row2. - "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", - "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export - }; - assertTrue(runExport(args)); + String[] args = new String[] { + // Only export row1 & row2. + "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", + "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", name.getMethodName(), FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); - final String IMPORT_TABLE = name.getMethodName() + "import"; - try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { - args = new String[] { - "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING, - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); - - Get g = new Get(ROW1); - g.setMaxVersions(); - Result r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW2); - g.setMaxVersions(); - r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW3); - r = t.get(g); - assertEquals(0, r.size()); - } + final String IMPORT_TABLE = name.getMethodName() + "import"; + try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { + args = + new String[] { "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, + IMPORT_TABLE, FQ_OUTPUT_DIR }; + assertTrue(runImport(args)); + + Get g = new Get(ROW1); + g.setMaxVersions(); + Result r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW2); + g.setMaxVersions(); + r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW3); + r = t.get(g); + assertEquals(0, r.size()); + } } /** * Test export hbase:meta table - * * @throws Throwable */ @Test public void testMetaExport() throws Throwable { - String[] args = new String[] { TableName.META_TABLE_NAME.getNameAsString(), - FQ_OUTPUT_DIR, "1", "0", "0" }; + String[] args = + new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } @@ -274,34 +269,26 @@ public void testImport94Table() throws Throwable { fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name)); String IMPORT_TABLE = name; try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3);) { - String[] args = new String[] { - "-Dhbase.import.version=0.94" , - IMPORT_TABLE, FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-Dhbase.import.version=0.94", IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - /* exportedTableIn94Format contains 5 rows - ROW COLUMN+CELL - r1 column=f1:c1, timestamp=1383766761171, value=val1 - r2 column=f1:c1, timestamp=1383766771642, value=val2 - r3 column=f1:c1, timestamp=1383766777615, value=val3 - r4 column=f1:c1, timestamp=1383766785146, value=val4 - r5 column=f1:c1, timestamp=1383766791506, value=val5 - */ - assertEquals(5, UTIL.countRows(t)); + /* + * exportedTableIn94Format contains 5 rows ROW COLUMN+CELL r1 column=f1:c1, + * timestamp=1383766761171, value=val1 r2 column=f1:c1, timestamp=1383766771642, value=val2 r3 + * column=f1:c1, timestamp=1383766777615, value=val3 r4 column=f1:c1, timestamp=1383766785146, + * value=val4 r5 column=f1:c1, timestamp=1383766791506, value=val5 + */ + assertEquals(5, UTIL.countRows(t)); } } /** * Test export scanner batching */ - @Test - public void testExportScannerBatching() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(1) - .build()) - .build(); + @Test + public void testExportScannerBatching() throws Throwable { + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(1).build()).build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { @@ -313,11 +300,11 @@ public void testExportScannerBatching() throws Throwable { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - String[] args = new String[] { - "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg. - name.getMethodName(), - FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added + // scanner + // batching + // arg. + name.getMethodName(), FQ_OUTPUT_DIR }; assertTrue(runExport(args)); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); @@ -327,12 +314,10 @@ public void testExportScannerBatching() throws Throwable { @Test public void testWithDeletes() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { @@ -345,35 +330,26 @@ public void testWithDeletes() throws Throwable { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - Delete d = new Delete(ROW1, now+3); + Delete d = new Delete(ROW1, now + 3); t.delete(d); d = new Delete(ROW1); - d.addColumns(FAMILYA, QUAL, now+2); + d.addColumns(FAMILYA, QUAL, now + 2); t.delete(d); } - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", name.getMethodName(), + FQ_OUTPUT_DIR, "1000", // max number of key versions per key to export }; assertTrue(runExport(args)); final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { - args = new String[] { - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; + args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -383,71 +359,60 @@ public void testWithDeletes() throws Throwable { Result r = scanner.next(); Cell[] res = r.rawCells(); assertTrue(PrivateCellUtil.isDeleteFamily(res[0])); - assertEquals(now+4, res[1].getTimestamp()); - assertEquals(now+3, res[2].getTimestamp()); + assertEquals(now + 4, res[1].getTimestamp()); + assertEquals(now + 3, res[2].getTimestamp()); assertTrue(CellUtil.isDelete(res[3])); - assertEquals(now+2, res[4].getTimestamp()); - assertEquals(now+1, res[5].getTimestamp()); + assertEquals(now + 2, res[4].getTimestamp()); + assertEquals(now + 1, res[5].getTimestamp()); assertEquals(now, res[6].getTimestamp()); } } - @Test public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) .build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); exportT.delete(d); - //Add second version of QUAL + // Add second version of QUAL p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now + 5, "s".getBytes()); exportT.put(p); - //Add second Delete family marker - d = new Delete(ROW1, now+7); + // Add second Delete family marker + d = new Delete(ROW1, now + 7); exportT.delete(d); - - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key + // to export }; assertTrue(runExport(args)); final String importTable = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importT = UTIL.getConnection().getTable(TableName.valueOf(importTable)); - args = new String[] { - importTable, - FQ_OUTPUT_DIR - }; + args = new String[] { importTable, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -458,11 +423,11 @@ public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Thro Result importedTResult = importedTScanner.next(); ResultScanner exportedTScanner = exportT.getScanner(s); - Result exportedTResult = exportedTScanner.next(); + Result exportedTResult = exportedTScanner.next(); try { Result.compareResults(exportedTResult, importedTResult); } catch (Throwable e) { - fail("Original and imported tables data comparision failed with error:"+e.getMessage()); + fail("Original and imported tables data comparision failed with error:" + e.getMessage()); } finally { exportT.close(); importT.close(); @@ -470,18 +435,15 @@ public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Thro } /** - * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, + * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, * attempt with invalid values. */ @Test public void testWithFilter() throws Throwable { // Create simple table to export - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()).build(); UTIL.getAdmin().createTable(desc); Table exportTable = UTIL.getConnection().getTable(desc.getTableName()); @@ -504,19 +466,14 @@ public void testWithFilter() throws Throwable { // Import to a new table final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()).build(); UTIL.getAdmin().createTable(desc); Table importTable = UTIL.getConnection().getTable(desc.getTableName()); args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + PrefixFilter.class.getName(), "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, - FQ_OUTPUT_DIR, - "1000" }; + FQ_OUTPUT_DIR, "1000" }; assertTrue(runImport(args)); // get the count of the source table for that time range @@ -564,7 +521,7 @@ private int getCount(Table table, Filter filter) throws IOException { public void testImportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -594,29 +551,19 @@ public void testExportScan() throws Exception { String prefix = "row"; String label_0 = "label_0"; String label_1 = "label_1"; - String[] args = { - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + String[] args = { "table", "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Scan scan = ExportUtils.getScanFromCommandLine(UTIL.getConfiguration(), args); assertEquals(version, scan.getMaxVersions()); assertEquals(startTime, scan.getTimeRange().getMin()); assertEquals(endTime, scan.getTimeRange().getMax()); assertEquals(true, (scan.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); - String[] argsWithLabels = { - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + assertEquals(0, + Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + String[] argsWithLabels = + { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, "table", + "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Configuration conf = new Configuration(UTIL.getConfiguration()); // parse the "-D" options String[] otherArgs = new GenericOptionsParser(conf, argsWithLabels).getRemainingArgs(); @@ -625,7 +572,8 @@ public void testExportScan() throws Exception { assertEquals(startTime, scanWithLabels.getTimeRange().getMin()); assertEquals(endTime, scanWithLabels.getTimeRange().getMax()); assertEquals(true, (scanWithLabels.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), + Bytes.toBytesBinary(prefix))); assertEquals(2, scanWithLabels.getAuthorizations().getLabels().size()); assertEquals(label_0, scanWithLabels.getAuthorizations().getLabels().get(0)); assertEquals(label_1, scanWithLabels.getAuthorizations().getLabels().get(1)); @@ -638,7 +586,7 @@ public void testExportScan() throws Exception { public void testExportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -651,11 +599,10 @@ public void testExportMain() throws Throwable { assertEquals(-1, newSecurityManager.getExitCode()); String errMsg = data.toString(); assertTrue(errMsg.contains("Wrong number of arguments:")); - assertTrue(errMsg.contains( - "Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]")); assertTrue( - errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); + errMsg.contains("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]")); + assertTrue(errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); assertTrue(errMsg.contains("-D hbase.mapreduce.include.deleted.rows=true")); assertTrue(errMsg.contains("-D hbase.client.scanner.caching=100")); assertTrue(errMsg.contains("-D hbase.export.scanner.batch=10")); @@ -702,8 +649,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } /** - * Test addFilterAndArguments method of Import This method set couple - * parameters into Configuration + * Test addFilterAndArguments method of Import This method set couple parameters into + * Configuration */ @Test public void testAddFilterAndArguments() throws IOException { @@ -715,7 +662,7 @@ public void testAddFilterAndArguments() throws IOException { Import.addFilterAndArguments(configuration, FilterBase.class, args); assertEquals("org.apache.hadoop.hbase.filter.FilterBase", - configuration.get(Import.FILTER_CLASS_CONF_KEY)); + configuration.get(Import.FILTER_CLASS_CONF_KEY)); assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY)); } @@ -739,7 +686,7 @@ public void testDurability() throws Throwable { exportTable.put(put); // Run the export - String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000"}; + String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000" }; assertTrue(runExport(args)); // Create the table for import @@ -754,13 +701,12 @@ public void testDurability() throws Throwable { wal.registerWALActionsListener(walListener); // Run the import with SKIP_WAL - args = - new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), - importTableName, FQ_OUTPUT_DIR }; + args = new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), + importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is not visisted + // Assert that the wal is not visisted assertTrue(!walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); // Run the import with the default durability option @@ -773,16 +719,16 @@ public void testDurability() throws Throwable { wal.registerWALActionsListener(walListener); args = new String[] { importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is visisted + // Assert that the wal is visisted assertTrue(walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); } } /** - * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to - * identify that an entry is written to the Write Ahead Log for the given table. + * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to identify + * that an entry is written to the Write Ahead Log for the given table. */ private static class TableWALActionListener implements WALActionsListener { @@ -796,7 +742,7 @@ public TableWALActionListener(RegionInfo region) { @Override public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { if (logKey.getTableName().getNameAsString().equalsIgnoreCase( - this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { + this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { isVisited = true; } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java index 6e7375295a6e..caab813c1651 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellBasedWALPlayer2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,7 @@ /** * Basic test for the WALPlayer M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCellBasedWALPlayer2 { @ClassRule @@ -91,7 +91,7 @@ public class TestCellBasedWALPlayer2 { @BeforeClass public static void beforeClass() throws Exception { - conf= TEST_UTIL.getConfiguration(); + conf = TEST_UTIL.getConfiguration(); rootDir = TEST_UTIL.createRootDir(); walRootDir = TEST_UTIL.createWALRootDir(); fs = CommonFSUtils.getRootDirFileSystem(conf); @@ -134,19 +134,17 @@ public void testWALPlayer() throws Exception { // replay the WAL, map table 1 to table 2 WAL log = cluster.getRegionServer(0).getWAL(null); log.rollWriter(); - String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() - .getWALRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); + String walInputDir = new Path(cluster.getMaster().getMasterFileSystem().getWALRootDir(), + HConstants.HREGION_LOGDIR_NAME).toString(); - Configuration configuration= TEST_UTIL.getConfiguration(); + Configuration configuration = TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); - String optionName="_test_.name"; + String optionName = "_test_.name"; configuration.set(optionName, "1000"); player.setupTime(configuration, optionName); - assertEquals(1000,configuration.getLong(optionName,0)); + assertEquals(1000, configuration.getLong(optionName, 0)); assertEquals(0, ToolRunner.run(configuration, player, - new String[] {walInputDir, tableName1.getNameAsString(), - tableName2.getNameAsString() })); - + new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); // verify the WAL was player into table 2 Get g = new Get(ROW); @@ -210,7 +208,7 @@ public void testMainMethod() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -223,8 +221,8 @@ public void testMainMethod() throws Exception { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " [ ]")); + assertTrue(data.toString() + .contains("Usage: WALPlayer [options] " + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java index 309ecc81df53..ce3e4e3cfd0b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCellCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,7 +50,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCellCounter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -66,8 +66,8 @@ public class TestCellCounter { private static final byte[] QUALIFIER = Bytes.toBytes("q"); private static Path FQ_OUTPUT_DIR; - private static final String OUTPUT_DIR = "target" + File.separator + "test-data" + File.separator - + "output"; + private static final String OUTPUT_DIR = + "target" + File.separator + "test-data" + File.separator + "output"; private static long now = EnvironmentEdgeManager.currentTime(); @Rule @@ -87,7 +87,6 @@ public static void afterClass() throws Exception { /** * Test CellCounter all data should print to output - * */ @Test public void testCellCounter() throws Exception { @@ -107,7 +106,7 @@ public void testCellCounter() throws Exception { String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1" }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -143,7 +142,7 @@ public void testCellCounterPrefix() throws Exception { String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "\\x01row1" }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -177,10 +176,10 @@ public void testCellCounterStartTimeRange() throws Exception { p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23")); t.put(p); String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1", - "--starttime=" + now, "--endtime=" + now + 2 }; + "--starttime=" + now, "--endtime=" + now + 2 }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -214,10 +213,10 @@ public void testCellCounteEndTimeRange() throws Exception { p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23")); t.put(p); String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "^row1", - "--endtime=" + now + 1 }; + "--endtime=" + now + 1 }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); assertTrue(data.contains("Total Families Across all Rows" + "\t" + "2")); @@ -250,13 +249,12 @@ public void testCellCounteOutOfTimeRange() throws Exception { p.addColumn(FAMILY_A, QUALIFIER, now + 1, Bytes.toBytes("Data22")); p.addColumn(FAMILY_B, QUALIFIER, now + 2, Bytes.toBytes("Data23")); t.put(p); - String[] args = - { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", "--starttime=" + now + 1, - "--endtime=" + now + 2 }; + String[] args = { sourceTable.getNameAsString(), FQ_OUTPUT_DIR.toString(), ";", + "--starttime=" + now + 1, "--endtime=" + now + 2 }; runCount(args); FileInputStream inputStream = - new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); + new FileInputStream(OUTPUT_DIR + File.separator + "part-r-00000"); String data = IOUtils.toString(inputStream); inputStream.close(); // nothing should hace been emitted to the reducer @@ -269,8 +267,8 @@ public void testCellCounteOutOfTimeRange() throws Exception { private boolean runCount(String[] args) throws Exception { // need to make a copy of the configuration because to make sure // different temp dirs are used. - int status = ToolRunner.run(new Configuration(UTIL.getConfiguration()), new CellCounter(), - args); + int status = + ToolRunner.run(new Configuration(UTIL.getConfiguration()), new CellCounter(), args); return status == 0; } @@ -282,7 +280,7 @@ public void testCellCounterMain() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -314,9 +312,8 @@ public void testCellCounterForCompleteTable() throws Exception { final TableName sourceTable = TableName.valueOf(name.getMethodName()); String outputPath = OUTPUT_DIR + sourceTable; LocalFileSystem localFileSystem = new LocalFileSystem(); - Path outputDir = - new Path(outputPath).makeQualified(localFileSystem.getUri(), - localFileSystem.getWorkingDirectory()); + Path outputDir = new Path(outputPath).makeQualified(localFileSystem.getUri(), + localFileSystem.getWorkingDirectory()); byte[][] families = { FAMILY_A, FAMILY_B }; Table t = UTIL.createTable(sourceTable, families); try { @@ -348,7 +345,7 @@ public void testCellCounterForCompleteTable() throws Exception { FileUtil.fullyDelete(new File(outputPath)); args = new String[] { "-D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=a, b", - sourceTable.getNameAsString(), outputDir.toString(), ";"}; + sourceTable.getNameAsString(), outputDir.toString(), ";" }; runCount(args); inputStream = new FileInputStream(outputPath + File.separator + "part-r-00000"); String data2 = IOUtils.toString(inputStream); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java index 0271983428a7..7d9406259339 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestCopyTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ /** * Basic test for the CopyTable M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestCopyTable { @ClassRule @@ -95,20 +95,19 @@ private void doCopyTableTest(boolean bulkload) throws Exception { final byte[] COLUMN1 = Bytes.toBytes("c1"); try (Table t1 = TEST_UTIL.createTable(tableName1, FAMILY); - Table t2 = TEST_UTIL.createTable(tableName2, FAMILY);) { + Table t2 = TEST_UTIL.createTable(tableName2, FAMILY);) { // put rows into the first table loadData(t1, FAMILY, COLUMN1); CopyTable copy = new CopyTable(); int code; if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - "--bulkload", tableName1.getNameAsString() }); + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", + tableName1.getNameAsString() }); } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - tableName1.getNameAsString() }); + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); } assertEquals("copy job failed", 0, code); @@ -130,15 +129,13 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { cfd.setMobEnabled(true); cfd.setMobThreshold(5); - TableDescriptor desc1 = TableDescriptorBuilder.newBuilder(tableName1) - .setColumnFamily(cfd.build()) - .build(); - TableDescriptor desc2 = TableDescriptorBuilder.newBuilder(tableName2) - .setColumnFamily(cfd.build()) - .build(); + TableDescriptor desc1 = + TableDescriptorBuilder.newBuilder(tableName1).setColumnFamily(cfd.build()).build(); + TableDescriptor desc2 = + TableDescriptorBuilder.newBuilder(tableName2).setColumnFamily(cfd.build()).build(); try (Table t1 = TEST_UTIL.createTable(desc1, null); - Table t2 = TEST_UTIL.createTable(desc2, null);) { + Table t2 = TEST_UTIL.createTable(desc2, null);) { // put rows into the first table for (int i = 0; i < 10; i++) { @@ -151,13 +148,12 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { int code; if (bulkload) { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - "--bulkload", tableName1.getNameAsString() }); + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2.getNameAsString(), "--bulkload", + tableName1.getNameAsString() }); } else { - code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2.getNameAsString(), - tableName1.getNameAsString() }); + code = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, new String[] { + "--new.name=" + tableName2.getNameAsString(), tableName1.getNameAsString() }); } assertEquals("copy job failed", 0, code); @@ -168,17 +164,15 @@ private void doCopyTableTestWithMob(boolean bulkload) throws Exception { assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); assertEquals("compare row values between two tables", - t1.getDescriptor().getValue("row" + i), - t2.getDescriptor().getValue("row" + i)); + t1.getDescriptor().getValue("row" + i), t2.getDescriptor().getValue("row" + i)); } - assertEquals("compare count of mob rows after table copy", MobTestUtil.countMobRows(TEST_UTIL, t1), - MobTestUtil.countMobRows(TEST_UTIL, t2)); + assertEquals("compare count of mob rows after table copy", + MobTestUtil.countMobRows(TEST_UTIL, t1), MobTestUtil.countMobRows(TEST_UTIL, t2)); assertEquals("compare count of mob row values between two tables", - t1.getDescriptor().getValues().size(), - t2.getDescriptor().getValues().size()); + t1.getDescriptor().getValues().size(), t2.getDescriptor().getValues().size()); assertTrue("The mob row count is 0 but should be > 0", - MobTestUtil.countMobRows(TEST_UTIL, t2) > 0); + MobTestUtil.countMobRows(TEST_UTIL, t2) > 0); } finally { TEST_UTIL.deleteTable(tableName1); TEST_UTIL.deleteTable(tableName2); @@ -243,11 +237,10 @@ public void testStartStopRow() throws Exception { t1.put(p); CopyTable copy = new CopyTable(); - assertEquals( - 0, - ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), - copy, new String[] { "--new.name=" + tableName2, "--startrow=\\x01row1", - "--stoprow=\\x01row2", tableName1.getNameAsString() })); + assertEquals(0, + ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), copy, + new String[] { "--new.name=" + tableName2, "--startrow=\\x01row1", "--stoprow=\\x01row2", + tableName1.getNameAsString() })); // verify the data was copied into table 2 // row1 exist, row0, row2 do not exist @@ -324,7 +317,7 @@ public void testMainMethod() throws Exception { PrintStream writer = new PrintStream(data); System.setErr(writer); SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); try { CopyTable.main(emptyArgs); @@ -341,8 +334,8 @@ public void testMainMethod() throws Exception { } private boolean runCopy(String[] args) throws Exception { - int status = ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), - args); + int status = + ToolRunner.run(new Configuration(TEST_UTIL.getConfiguration()), new CopyTable(), args); return status == 0; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java index 46a449a43599..d1126ce59223 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestGroupingTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestGroupingTableMapper { @ClassRule @@ -60,10 +60,10 @@ public void testGroupingTableMapper() throws Exception { context.write(any(), any()); List keyValue = new ArrayList<>(); byte[] row = {}; - keyValue.add(new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes - .toBytes("value1"))); - keyValue.add(new KeyValue(row, Bytes.toBytes("family1"), Bytes.toBytes("clm"), Bytes - .toBytes("value2"))); + keyValue.add( + new KeyValue(row, Bytes.toBytes("family2"), Bytes.toBytes("clm"), Bytes.toBytes("value1"))); + keyValue.add( + new KeyValue(row, Bytes.toBytes("family1"), Bytes.toBytes("clm"), Bytes.toBytes("value2"))); when(result.listCells()).thenReturn(keyValue); mapper.map(null, result, context); // template data diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java index 2467dcace235..d843c0437829 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHBaseMRTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,6 +17,12 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.testclassification.LargeTests; @@ -25,14 +31,7 @@ import org.junit.Test; import org.junit.experimental.categories.Category; -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestHBaseMRTestingUtility { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -55,17 +54,21 @@ public void testMRYarnConfigsPopulation() throws IOException { hbt.getConfiguration().set(entry.getKey(), entry.getValue()); } - for (Map.Entry entry : dummyProps.entrySet()) { - assertTrue("The Configuration for key " + entry.getKey() +" and value: " + entry.getValue() + - " is not populated correctly", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + for (Map.Entry entry : dummyProps.entrySet()) { + assertTrue( + "The Configuration for key " + entry.getKey() + " and value: " + entry.getValue() + + " is not populated correctly", + hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); } hbt.startMiniMapReduceCluster(); // Confirm that MiniMapReduceCluster overwrites the mr properties and updates the Configuration - for (Map.Entry entry : dummyProps.entrySet()) { - assertFalse("The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini"+ - "cluster is started", hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); + for (Map.Entry entry : dummyProps.entrySet()) { + assertFalse( + "The MR prop: " + entry.getValue() + " is not overwritten when map reduce mini" + + "cluster is started", + hbt.getConfiguration().get(entry.getKey()).equals(entry.getValue())); } hbt.shutdownMiniMapReduceCluster(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java index 9b82cbdfc876..4d15e006e12f 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHFileOutputFormat2.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -124,14 +124,13 @@ import org.slf4j.LoggerFactory; /** - * Simple test for {@link HFileOutputFormat2}. - * Sets up and runs a mapreduce job that writes hfile output. - * Creates a few inner classes to implement splits and an inputformat that - * emits keys and values like those of {@link PerformanceEvaluation}. + * Simple test for {@link HFileOutputFormat2}. Sets up and runs a mapreduce job that writes hfile + * output. Creates a few inner classes to implement splits and an inputformat that emits keys and + * values like those of {@link PerformanceEvaluation}. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) -//TODO : Remove this in 3.0 -public class TestHFileOutputFormat2 { +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) +// TODO : Remove this in 3.0 +public class TestHFileOutputFormat2 { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -140,10 +139,10 @@ public class TestHFileOutputFormat2 { private final static int ROWSPERSPLIT = 1024; public static final byte[] FAMILY_NAME = TestHRegionFileSystem.FAMILY_NAME; - private static final byte[][] FAMILIES = { - Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B"))}; - private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", - "TestTable3").map(TableName::valueOf).toArray(TableName[]::new); + private static final byte[][] FAMILIES = + { Bytes.add(FAMILY_NAME, Bytes.toBytes("-A")), Bytes.add(FAMILY_NAME, Bytes.toBytes("-B")) }; + private static final TableName[] TABLE_NAMES = Stream.of("TestTable", "TestTable2", "TestTable3") + .map(TableName::valueOf).toArray(TableName[]::new); private HBaseTestingUtility util = new HBaseTestingUtility(); @@ -153,45 +152,39 @@ public class TestHFileOutputFormat2 { * Simple mapper that makes KeyValue output. */ static class RandomKVGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; - private static final int KEYLEN_DEFAULT=10; - private static final String KEYLEN_CONF="randomkv.key.length"; + private static final int KEYLEN_DEFAULT = 10; + private static final String KEYLEN_CONF = "randomkv.key.length"; private int valLength; - private static final int VALLEN_DEFAULT=10; - private static final String VALLEN_CONF="randomkv.val.length"; - private static final byte [] QUALIFIER = Bytes.toBytes("data"); + private static final int VALLEN_DEFAULT = 10; + private static final String VALLEN_CONF = "randomkv.val.length"; + private static final byte[] QUALIFIER = Bytes.toBytes("data"); private boolean multiTableMapper = false; private TableName[] tables = null; - @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException ,InterruptedException - { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -223,8 +216,7 @@ protected void map( * Simple mapper that makes Put output. */ static class RandomPutGeneratingMapper - extends Mapper { + extends Mapper { private int keyLength; private static final int KEYLEN_DEFAULT = 10; @@ -238,28 +230,25 @@ static class RandomPutGeneratingMapper private TableName[] tables = null; @Override - protected void setup(Context context) throws IOException, - InterruptedException { + protected void setup(Context context) throws IOException, InterruptedException { super.setup(context); Configuration conf = context.getConfiguration(); keyLength = conf.getInt(KEYLEN_CONF, KEYLEN_DEFAULT); valLength = conf.getInt(VALLEN_CONF, VALLEN_DEFAULT); - multiTableMapper = conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, - false); + multiTableMapper = + conf.getBoolean(HFileOutputFormat2.MULTI_TABLE_HFILEOUTPUTFORMAT_CONF_KEY, false); if (multiTableMapper) { tables = TABLE_NAMES; } else { - tables = new TableName[]{TABLE_NAMES[0]}; + tables = new TableName[] { TABLE_NAMES[0] }; } } @Override - protected void map( - NullWritable n1, NullWritable n2, - Mapper.Context context) - throws java.io.IOException, InterruptedException { + protected void map(NullWritable n1, NullWritable n2, + Mapper.Context context) + throws java.io.IOException, InterruptedException { byte keyBytes[] = new byte[keyLength]; byte valBytes[] = new byte[valLength]; @@ -306,28 +295,27 @@ private void setupRandomGeneratorMapper(Job job, boolean putSortReducer) { } /** - * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if - * passed a keyvalue whose timestamp is {@link HConstants#LATEST_TIMESTAMP}. + * Test that {@link HFileOutputFormat2} RecordWriter amends timestamps if passed a keyvalue whose + * timestamp is {@link HConstants#LATEST_TIMESTAMP}. * @see HBASE-2615 */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test - public void test_LATEST_TIMESTAMP_isReplaced() - throws Exception { + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test + public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); + Path dir = util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be - // changed by call to write. Check all in kv is same but ts. + // Test 1. Pass a KV that has a ts of LATEST_TIMESTAMP. It should be + // changed by call to write. Check all in kv is same but ts. KeyValue kv = new KeyValue(b, b, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); @@ -338,7 +326,7 @@ public void test_LATEST_TIMESTAMP_isReplaced() assertNotSame(original.getTimestamp(), kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp()); - // Test 2. Now test passing a kv that has explicit ts. It should not be + // Test 2. Now test passing a kv that has explicit ts. It should not be // changed by call to record write. kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b); original = kv.clone(); @@ -350,26 +338,25 @@ public void test_LATEST_TIMESTAMP_isReplaced() } } - private TaskAttemptContext createTestTaskAttemptContext(final Job job) - throws Exception { + private TaskAttemptContext createTestTaskAttemptContext(final Job job) throws Exception { HadoopShims hadoop = CompatibilitySingletonFactory.getInstance(HadoopShims.class); - TaskAttemptContext context = hadoop.createTestTaskAttemptContext( - job, "attempt_201402131733_0001_m_000000_0"); + TaskAttemptContext context = + hadoop.createTestTaskAttemptContext(job, "attempt_201402131733_0001_m_000000_0"); return context; } /* - * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE - * metadata used by time-restricted scans. + * Test that {@link HFileOutputFormat2} creates an HFile with TIMERANGE metadata used by + * time-restricted scans. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("test_TIMERANGE_present"); - LOG.info("Timerange dir writing to dir: "+ dir); + Path dir = util.getDataTestDir("test_TIMERANGE_present"); + LOG.info("Timerange dir writing to dir: " + dir); try { // build a record writer using HFileOutputFormat2 Job job = new Job(conf); @@ -379,13 +366,13 @@ public void test_TIMERANGE() throws Exception { writer = hof.getRecordWriter(context); // Pass two key values with explicit times stamps - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); // value 1 with timestamp 2000 KeyValue kv = new KeyValue(b, b, b, 2000, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); - assertEquals(original,kv); + assertEquals(original, kv); // value 2 with timestamp 1000 kv = new KeyValue(b, b, b, 1000, b); @@ -407,14 +394,13 @@ public void test_TIMERANGE() throws Exception { // open as HFile Reader and pull out TIMERANGE FileInfo. HFile.Reader rd = HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), true, conf); - Map finfo = rd.getHFileInfo(); + Map finfo = rd.getHFileInfo(); byte[] range = finfo.get(Bytes.toBytes("TIMERANGE")); assertNotNull(range); // unmarshall and check values. - TimeRangeTracker timeRangeTracker =TimeRangeTracker.parseFrom(range); - LOG.info(timeRangeTracker.getMin() + - "...." + timeRangeTracker.getMax()); + TimeRangeTracker timeRangeTracker = TimeRangeTracker.parseFrom(range); + LOG.info(timeRangeTracker.getMin() + "...." + timeRangeTracker.getMax()); assertEquals(1000, timeRangeTracker.getMin()); assertEquals(2000, timeRangeTracker.getMax()); rd.close(); @@ -427,7 +413,8 @@ public void test_TIMERANGE() throws Exception { /** * Run small MR job. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testWritingPEData() throws Exception { Configuration conf = util.getConfiguration(); Path testDir = util.getDataTestDirOnTestFS("testWritingPEData"); @@ -446,8 +433,8 @@ public void testWritingPEData() throws Exception { byte[] startKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; byte[] endKey = new byte[RandomKVGeneratingMapper.KEYLEN_DEFAULT]; - Arrays.fill(startKey, (byte)0); - Arrays.fill(endKey, (byte)0xff); + Arrays.fill(startKey, (byte) 0); + Arrays.fill(endKey, (byte) 0xff); job.setPartitionerClass(SimpleTotalOrderPartitioner.class); // Set start and end rows for partitioner. @@ -457,49 +444,46 @@ public void testWritingPEData() throws Exception { job.setOutputFormatClass(HFileOutputFormat2.class); job.setNumReduceTasks(4); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - KeyValueSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + KeyValueSerialization.class.getName()); FileOutputFormat.setOutputPath(job, testDir); assertTrue(job.waitForCompletion(false)); - FileStatus [] files = fs.listStatus(testDir); + FileStatus[] files = fs.listStatus(testDir); assertTrue(files.length > 0); - //check output file num and size. + // check output file num and size. for (byte[] family : FAMILIES) { - long kvCount= 0; + long kvCount = 0; RemoteIterator iterator = - fs.listFiles(testDir.suffix("/" + new String(family)), true); + fs.listFiles(testDir.suffix("/" + new String(family)), true); while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = - HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); + HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); HFileScanner scanner = reader.getScanner(conf, false, false, false); kvCount += reader.getEntries(); scanner.seekTo(); long perKVSize = scanner.getCell().getSerializedSize(); assertTrue("Data size of each file should not be too large.", - perKVSize * reader.getEntries() <= hregionMaxFilesize); + perKVSize * reader.getEntries() <= hregionMaxFilesize); } assertEquals("Should write expected data in output file.", ROWSPERSPLIT, kvCount); } } /** - * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into - * hfile. + * Test that {@link HFileOutputFormat2} RecordWriter writes tags such as ttl into hfile. */ @Test - public void test_WritingTagData() - throws Exception { + public void test_WritingTagData() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); final String HFILE_FORMAT_VERSION_CONF_KEY = "hfile.format.version"; conf.setInt(HFILE_FORMAT_VERSION_CONF_KEY, HFile.MIN_FORMAT_VERSION_WITH_TAGS); RecordWriter writer = null; TaskAttemptContext context = null; - Path dir = - util.getDataTestDir("WritingTagData"); + Path dir = util.getDataTestDir("WritingTagData"); try { conf.set(HFileOutputFormat2.OUTPUT_TABLE_NAME_CONF_KEY, TABLE_NAMES[0].getNameAsString()); // turn locality off to eliminate getRegionLocation fail-and-retry time when writing kvs @@ -509,9 +493,9 @@ public void test_WritingTagData() context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); - final byte [] b = Bytes.toBytes("b"); + final byte[] b = Bytes.toBytes("b"); - List< Tag > tags = new ArrayList<>(); + List tags = new ArrayList<>(); tags.add(new ArrayBackedTag(TagType.TTL_TAG_TYPE, Bytes.toBytes(978670))); KeyValue kv = new KeyValue(b, b, b, HConstants.LATEST_TIMESTAMP, b, tags); writer.write(new ImmutableBytesWritable(), kv); @@ -519,7 +503,7 @@ public void test_WritingTagData() writer = null; FileSystem fs = dir.getFileSystem(conf); RemoteIterator iterator = fs.listFiles(dir, true); - while(iterator.hasNext()) { + while (iterator.hasNext()) { LocatedFileStatus keyFileStatus = iterator.next(); HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); @@ -538,11 +522,12 @@ public void test_WritingTagData() } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); - conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration") - .toString()); + conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, + util.getDataTestDir("testJobConfiguration").toString()); Job job = new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); Table table = Mockito.mock(Table.class); @@ -553,14 +538,14 @@ public void testJobConfiguration() throws Exception { assertEquals(job.getNumReduceTasks(), 4); } - private byte [][] generateRandomStartKeys(int numKeys) { + private byte[][] generateRandomStartKeys(int numKeys) { Random random = ThreadLocalRandom.current(); byte[][] ret = new byte[numKeys][]; // first region start key is always empty ret[0] = HConstants.EMPTY_BYTE_ARRAY; for (int i = 1; i < numKeys; i++) { ret[i] = - PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); + PerformanceEvaluation.generateData(random, PerformanceEvaluation.DEFAULT_VALUE_LENGTH); } return ret; } @@ -575,34 +560,37 @@ private byte[][] generateRandomSplitKeys(int numKeys) { return ret; } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoad() throws Exception { LOG.info("\nStarting test testMRIncrementalLoad\n"); doIncrementalLoadTest(false, false, false, "testMRIncrementalLoad"); } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithSplit() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithSplit\n"); doIncrementalLoadTest(true, false, false, "testMRIncrementalLoadWithSplit"); } /** - * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true - * This test could only check the correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY - * is set to true. Because MiniHBaseCluster always run with single hostname (and different ports), - * it's not possible to check the region locality by comparing region locations and DN hostnames. - * When MiniHBaseCluster supports explicit hostnames parameter (just like MiniDFSCluster does), - * we could test region locality features more easily. + * Test for HFileOutputFormat2.LOCALITY_SENSITIVE_CONF_KEY = true This test could only check the + * correctness of original logic if LOCALITY_SENSITIVE_CONF_KEY is set to true. Because + * MiniHBaseCluster always run with single hostname (and different ports), it's not possible to + * check the region locality by comparing region locations and DN hostnames. When MiniHBaseCluster + * supports explicit hostnames parameter (just like MiniDFSCluster does), we could test region + * locality features more easily. */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testMRIncrementalLoadWithLocality() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithLocality\n"); doIncrementalLoadTest(false, true, false, "testMRIncrementalLoadWithLocality1"); doIncrementalLoadTest(true, true, false, "testMRIncrementalLoadWithLocality2"); } - //@Ignore("Wahtevs") + // @Ignore("Wahtevs") @Test public void testMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMRIncrementalLoadWithPutSortReducer\n"); @@ -610,17 +598,16 @@ public void testMRIncrementalLoadWithPutSortReducer() throws Exception { } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, - boolean putSortReducer, String tableStr) throws Exception { - doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, - Arrays.asList(tableStr)); + boolean putSortReducer, String tableStr) throws Exception { + doIncrementalLoadTest(shouldChangeRegions, shouldKeepLocality, putSortReducer, + Arrays.asList(tableStr)); } @Test public void testMultiMRIncrementalLoadWithPutSortReducer() throws Exception { LOG.info("\nStarting test testMultiMRIncrementalLoadWithPutSortReducer\n"); doIncrementalLoadTest(false, false, true, - Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList - ())); + Arrays.stream(TABLE_NAMES).map(TableName::getNameAsString).collect(Collectors.toList())); } private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKeepLocality, @@ -641,8 +628,8 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe for (int i = 0; i < hostCount; ++i) { hostnames[i] = "datanode_" + i; } - StartMiniClusterOption option = StartMiniClusterOption.builder() - .numRegionServers(hostCount).dataNodeHosts(hostnames).build(); + StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(hostCount) + .dataNodeHosts(hostnames).build(); util.startMiniCluster(option); Map allTables = new HashMap<>(tableStr.size()); @@ -677,8 +664,7 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe if (allTables.containsKey(tf.getPath().getName())) { ++numTableDirs; tablePath = tf.getPath(); - } - else { + } else { continue; } } @@ -712,9 +698,8 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe byte[][] newSplitKeys = generateRandomSplitKeys(14); Table table = util.createTable(chosenTable.getName(), FAMILIES, newSplitKeys); - while (util.getConnection().getRegionLocator(chosenTable.getName()) - .getAllRegionLocations().size() != 15 || - !admin.isTableAvailable(table.getName())) { + while (util.getConnection().getRegionLocator(chosenTable.getName()).getAllRegionLocations() + .size() != 15 || !admin.isTableAvailable(table.getName())) { Thread.sleep(200); LOG.info("Waiting for new region assignment to happen"); } @@ -730,19 +715,19 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } Table currentTable = allTables.get(tableNameStr); TableName currentTableName = currentTable.getName(); - new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, singleTableInfo - .getRegionLocator()); + new LoadIncrementalHFiles(conf).doBulkLoad(tableDir, admin, currentTable, + singleTableInfo.getRegionLocator()); // Ensure data shows up int expectedRows = 0; if (putSortReducer) { // no rows should be extracted assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); } else { expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, - util.countRows(currentTable)); + util.countRows(currentTable)); Scan scan = new Scan(); ResultScanner results = currentTable.getScanner(scan); for (Result res : results) { @@ -775,14 +760,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } admin.enableTable(currentTableName); util.waitTableAvailable(currentTableName); - assertEquals("Data should remain after reopening of regions", - tableDigestBefore, util.checksumRows(currentTable)); + assertEquals("Data should remain after reopening of regions", tableDigestBefore, + util.checksumRows(currentTable)); } } finally { for (HFileOutputFormat2.TableInfo tableInfoSingle : tableInfo) { - tableInfoSingle.getRegionLocator().close(); + tableInfoSingle.getRegionLocator().close(); } - for (Entry singleTable : allTables.entrySet() ) { + for (Entry singleTable : allTables.entrySet()) { singleTable.getValue().close(); util.deleteTable(singleTable.getValue().getName()); } @@ -791,14 +776,14 @@ private void doIncrementalLoadTest(boolean shouldChangeRegions, boolean shouldKe } } - private void runIncrementalPELoad(Configuration conf, List tableInfo, Path outDir, - boolean putSortReducer) throws IOException, - InterruptedException, ClassNotFoundException { + private void runIncrementalPELoad(Configuration conf, + List tableInfo, Path outDir, boolean putSortReducer) + throws IOException, InterruptedException, ClassNotFoundException { Job job = new Job(conf, "testLocalMRIncrementalLoad"); job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad")); job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"), - MutationSerialization.class.getName(), ResultSerialization.class.getName(), - KeyValueSerialization.class.getName()); + MutationSerialization.class.getName(), ResultSerialization.class.getName(), + KeyValueSerialization.class.getName()); setupRandomGeneratorMapper(job, putSortReducer); if (tableInfo.size() > 1) { MultiTableHFileOutputFormat.configureIncrementalLoad(job, tableInfo); @@ -807,29 +792,27 @@ private void runIncrementalPELoad(Configuration conf, List retrievedFamilyToCompressionMap = HFileOutputFormat2 - .createFamilyCompressionMap(conf); + Map retrievedFamilyToCompressionMap = + HFileOutputFormat2.createFamilyCompressionMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToCompression.entrySet()) { - assertEquals("Compression configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToCompressionMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals("Compression configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToCompressionMap.get(entry.getKey().getBytes("UTF-8"))); } } } @@ -860,21 +841,17 @@ private void setupMockColumnFamiliesForCompression(Table table, Map familyToCompression) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToCompression.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setCompressionType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setCompressionType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForCompression (int numCfs) { + private Map getMockColumnFamiliesForCompression(int numCfs) { Map familyToCompression = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { @@ -892,39 +869,33 @@ private void setupMockColumnFamiliesForCompression(Table table, return familyToCompression; } - /** - * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. - * Tests that the family bloom type map is correctly serialized into - * and deserialized from configuration - * + * Test for {@link HFileOutputFormat2#createFamilyBloomTypeMap(Configuration)}. Tests that the + * family bloom type map is correctly serialized into and deserialized from configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException { for (int numCfs = 0; numCfs <= 2; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBloomType = - getMockColumnFamiliesForBloomType(numCfs); + Map familyToBloomType = getMockColumnFamiliesForBloomType(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBloomType(table, - familyToBloomType); + setupMockColumnFamiliesForBloomType(table, familyToBloomType); conf.set(HFileOutputFormat2.BLOOM_TYPE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, - Arrays.asList(table.getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.bloomTypeDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBloomTypeMap = - HFileOutputFormat2 - .createFamilyBloomTypeMap(conf); + HFileOutputFormat2.createFamilyBloomTypeMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToBloomType.entrySet()) { - assertEquals("BloomType configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals("BloomType configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes("UTF-8"))); } } } @@ -933,29 +904,24 @@ private void setupMockColumnFamiliesForBloomType(Table table, Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setBloomFilterType(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setBloomFilterType(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBloomType (int numCfs) { + private Map getMockColumnFamiliesForBloomType(int numCfs) { Map familyToBloomType = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBloomType.put("Family1!@#!@#&", BloomType.ROW); } if (numCfs-- > 0) { - familyToBloomType.put("Family2=asdads&!AASD", - BloomType.ROWCOL); + familyToBloomType.put("Family2=asdads&!AASD", BloomType.ROWCOL); } if (numCfs-- > 0) { familyToBloomType.put("Family3", BloomType.NONE); @@ -964,39 +930,32 @@ private void setupMockColumnFamiliesForBloomType(Table table, } /** - * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. - * Tests that the family block size map is correctly serialized into - * and deserialized from configuration - * + * Test for {@link HFileOutputFormat2#createFamilyBlockSizeMap(Configuration)}. Tests that the + * family block size map is correctly serialized into and deserialized from configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyBlockSizeMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); - Map familyToBlockSize = - getMockColumnFamiliesForBlockSize(numCfs); + Map familyToBlockSize = getMockColumnFamiliesForBlockSize(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForBlockSize(table, - familyToBlockSize); + setupMockColumnFamiliesForBlockSize(table, familyToBlockSize); conf.set(HFileOutputFormat2.BLOCK_SIZE_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.blockSizeDetails, Arrays.asList(table - .getTableDescriptor()))); + HFileOutputFormat2.serializeColumnFamilyAttribute(HFileOutputFormat2.blockSizeDetails, + Arrays.asList(table.getTableDescriptor()))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToBlockSizeMap = - HFileOutputFormat2 - .createFamilyBlockSizeMap(conf); + HFileOutputFormat2.createFamilyBlockSizeMap(conf); // test that we have a value for all column families that matches with the // used mock values - for (Entry entry : familyToBlockSize.entrySet() - ) { - assertEquals("BlockSize configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes("UTF-8"))); + for (Entry entry : familyToBlockSize.entrySet()) { + assertEquals("BlockSize configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), retrievedFamilyToBlockSizeMap.get(entry.getKey().getBytes("UTF-8"))); } } } @@ -1005,33 +964,27 @@ private void setupMockColumnFamiliesForBlockSize(Table table, Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setBlocksize(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setBlocksize(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForBlockSize (int numCfs) { + private Map getMockColumnFamiliesForBlockSize(int numCfs) { Map familyToBlockSize = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBlockSize.put("Family1!@#!@#&", 1234); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { - familyToBlockSize.put("Family2=asdads&!AASD", - Integer.MAX_VALUE); + familyToBlockSize.put("Family2=asdads&!AASD", Integer.MAX_VALUE); } if (numCfs-- > 0) { familyToBlockSize.put("Family3", 0); @@ -1040,39 +993,37 @@ private void setupMockColumnFamiliesForBlockSize(Table table, } /** - * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. - * Tests that the family data block encoding map is correctly serialized into - * and deserialized from configuration - * + * Test for {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}. Tests that + * the family data block encoding map is correctly serialized into and deserialized from + * configuration * @throws IOException */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException { for (int numCfs = 0; numCfs <= 3; numCfs++) { Configuration conf = new Configuration(this.util.getConfiguration()); Map familyToDataBlockEncoding = getMockColumnFamiliesForDataBlockEncoding(numCfs); Table table = Mockito.mock(Table.class); - setupMockColumnFamiliesForDataBlockEncoding(table, - familyToDataBlockEncoding); + setupMockColumnFamiliesForDataBlockEncoding(table, familyToDataBlockEncoding); HTableDescriptor tableDescriptor = table.getTableDescriptor(); conf.set(HFileOutputFormat2.DATABLOCK_ENCODING_FAMILIES_CONF_KEY, - HFileOutputFormat2.serializeColumnFamilyAttribute - (HFileOutputFormat2.dataBlockEncodingDetails, Arrays - .asList(tableDescriptor))); + HFileOutputFormat2.serializeColumnFamilyAttribute( + HFileOutputFormat2.dataBlockEncodingDetails, Arrays.asList(tableDescriptor))); // read back family specific data block encoding settings from the // configuration Map retrievedFamilyToDataBlockEncodingMap = - HFileOutputFormat2 - .createFamilyDataBlockEncodingMap(conf); + HFileOutputFormat2.createFamilyDataBlockEncodingMap(conf); // test that we have a value for all column families that matches with the // used mock values for (Entry entry : familyToDataBlockEncoding.entrySet()) { - assertEquals("DataBlockEncoding configuration incorrect for column family:" - + entry.getKey(), entry.getValue(), - retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes("UTF-8"))); + assertEquals( + "DataBlockEncoding configuration incorrect for column family:" + entry.getKey(), + entry.getValue(), + retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes("UTF-8"))); } } } @@ -1081,33 +1032,27 @@ private void setupMockColumnFamiliesForDataBlockEncoding(Table table, Map familyToDataBlockEncoding) throws IOException { HTableDescriptor mockTableDescriptor = new HTableDescriptor(TABLE_NAMES[0]); for (Entry entry : familyToDataBlockEncoding.entrySet()) { - mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()) - .setMaxVersions(1) - .setDataBlockEncoding(entry.getValue()) - .setBlockCacheEnabled(false) - .setTimeToLive(0)); + mockTableDescriptor.addFamily(new HColumnDescriptor(entry.getKey()).setMaxVersions(1) + .setDataBlockEncoding(entry.getValue()).setBlockCacheEnabled(false).setTimeToLive(0)); } Mockito.doReturn(mockTableDescriptor).when(table).getTableDescriptor(); } /** - * @return a map from column family names to compression algorithms for - * testing column family compression. Column family names have special characters + * @return a map from column family names to compression algorithms for testing column family + * compression. Column family names have special characters */ - private Map - getMockColumnFamiliesForDataBlockEncoding (int numCfs) { + private Map getMockColumnFamiliesForDataBlockEncoding(int numCfs) { Map familyToDataBlockEncoding = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family1!@#!@#&", DataBlockEncoding.DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.FAST_DIFF); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.FAST_DIFF); } if (numCfs-- > 0) { - familyToDataBlockEncoding.put("Family2=asdads&!AASD", - DataBlockEncoding.PREFIX); + familyToDataBlockEncoding.put("Family2=asdads&!AASD", DataBlockEncoding.PREFIX); } if (numCfs-- > 0) { familyToDataBlockEncoding.put("Family3", DataBlockEncoding.NONE); @@ -1116,12 +1061,8 @@ private void setupMockColumnFamiliesForDataBlockEncoding(Table table, } private void setupMockStartKeys(RegionLocator table) throws IOException { - byte[][] mockKeys = new byte[][] { - HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaa"), - Bytes.toBytes("ggg"), - Bytes.toBytes("zzz") - }; + byte[][] mockKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaa"), + Bytes.toBytes("ggg"), Bytes.toBytes("zzz") }; Mockito.doReturn(mockKeys).when(table).getStartKeys(); } @@ -1131,10 +1072,11 @@ private void setupMockTableName(RegionLocator table) throws IOException { } /** - * Test that {@link HFileOutputFormat2} RecordWriter uses compression and - * bloom filter settings from the column family descriptor + * Test that {@link HFileOutputFormat2} RecordWriter uses compression and bloom filter settings + * from the column family descriptor */ - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testColumnFamilySettings() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter writer = null; @@ -1146,7 +1088,7 @@ public void testColumnFamilySettings() throws Exception { RegionLocator regionLocator = Mockito.mock(RegionLocator.class); HTableDescriptor htd = new HTableDescriptor(TABLE_NAMES[0]); Mockito.doReturn(htd).when(table).getTableDescriptor(); - for (HColumnDescriptor hcd: HBaseTestingUtility.generateColumnDescriptors()) { + for (HColumnDescriptor hcd : HBaseTestingUtility.generateColumnDescriptors()) { htd.addFamily(hcd); } @@ -1194,8 +1136,8 @@ public void testColumnFamilySettings() throws Exception { byte[] bloomFilter = fileInfo.get(BLOOM_FILTER_TYPE_KEY); if (bloomFilter == null) bloomFilter = Bytes.toBytes("NONE"); - assertEquals("Incorrect bloom filter used for column family " + familyStr + - "(reader: " + reader + ")", + assertEquals( + "Incorrect bloom filter used for column family " + familyStr + "(reader: " + reader + ")", hcd.getBloomFilterType(), BloomType.valueOf(Bytes.toString(bloomFilter))); assertEquals( "Incorrect compression used for column family " + familyStr + "(reader: " + reader + ")", @@ -1207,8 +1149,8 @@ public void testColumnFamilySettings() throws Exception { } /** - * Write random values to the writer assuming a table created using - * {@link #FAMILIES} as column family descriptors + * Write random values to the writer assuming a table created using {@link #FAMILIES} as column + * family descriptors */ private void writeRandomKeyValues(RecordWriter writer, TaskAttemptContext context, Set families, int numRows) @@ -1219,7 +1161,7 @@ private void writeRandomKeyValues(RecordWriter wri int taskId = context.getTaskAttemptID().getTaskID().getId(); assert taskId < Byte.MAX_VALUE : "Unit tests dont support > 127 tasks!"; - final byte [] qualifier = Bytes.toBytes("data"); + final byte[] qualifier = Bytes.toBytes("data"); for (int i = 0; i < numRows; i++) { Bytes.putInt(keyBytes, 0, i); Bytes.random(valBytes); @@ -1232,12 +1174,12 @@ private void writeRandomKeyValues(RecordWriter wri } /** - * This test is to test the scenario happened in HBASE-6901. - * All files are bulk loaded and excluded from minor compaction. - * Without the fix of HBASE-6901, an ArrayIndexOutOfBoundsException - * will be thrown. + * This test is to test the scenario happened in HBASE-6901. All files are bulk loaded and + * excluded from minor compaction. Without the fix of HBASE-6901, an + * ArrayIndexOutOfBoundsException will be thrown. */ - @Ignore ("Flakey: See HBASE-9051") @Test + @Ignore("Flakey: See HBASE-9051") + @Test public void testExcludeAllFromMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1252,28 +1194,29 @@ public void testExcludeAllFromMinorCompaction() throws Exception { assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), - new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), - Bytes.toString(FAMILIES[0]))); + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), + Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // Generate two bulk load files - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); for (int i = 0; i < 2; i++) { Path testDir = util.getDataTestDirOnTestFS("testExcludeAllFromMinorCompaction_" + i); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), conn.getRegionLocator(TABLE_NAMES[0]))), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), + conn.getRegionLocator(TABLE_NAMES[0]))), + testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, locator); } // Ensure data shows up int expectedRows = 2 * NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("LoadIncrementalHFiles should put expected data in table", - expectedRows, util.countRows(table)); + assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1318,7 +1261,8 @@ public Boolean call() throws Exception { } } - @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test + @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") + @Test public void testExcludeMinorCompaction() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hstore.compaction.min", 2); @@ -1326,17 +1270,17 @@ public void testExcludeMinorCompaction() throws Exception { util.startMiniCluster(); try (Connection conn = ConnectionFactory.createConnection(conf); - Admin admin = conn.getAdmin()){ + Admin admin = conn.getAdmin()) { Path testDir = util.getDataTestDirOnTestFS("testExcludeMinorCompaction"); final FileSystem fs = util.getDFSCluster().getFileSystem(); Table table = util.createTable(TABLE_NAMES[0], FAMILIES); assertEquals("Should start with empty table", 0, util.countRows(table)); // deep inspection: get the StoreFile dir - final Path storePath = new Path( - CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), - new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), - Bytes.toString(FAMILIES[0]))); + final Path storePath = + new Path(CommonFSUtils.getTableDir(CommonFSUtils.getRootDir(conf), TABLE_NAMES[0]), + new Path(admin.getTableRegions(TABLE_NAMES[0]).get(0).getEncodedName(), + Bytes.toString(FAMILIES[0]))); assertEquals(0, fs.listStatus(storePath).length); // put some data in it and flush to create a storefile @@ -1353,20 +1297,20 @@ public Boolean call() throws Exception { }, 5000); // Generate a bulk load file with more rows - conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", - true); + conf.setBoolean("hbase.mapreduce.hfileoutputformat.compaction.exclude", true); RegionLocator regionLocator = conn.getRegionLocator(TABLE_NAMES[0]); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(table - .getTableDescriptor(), regionLocator)), testDir, false); + runIncrementalPELoad(conf, + Arrays.asList(new HFileOutputFormat2.TableInfo(table.getTableDescriptor(), regionLocator)), + testDir, false); // Perform the actual load new LoadIncrementalHFiles(conf).doBulkLoad(testDir, admin, table, regionLocator); // Ensure data shows up int expectedRows = NMapInputFormat.getNumMapTasks(conf) * ROWSPERSPLIT; - assertEquals("LoadIncrementalHFiles should put expected data in table", - expectedRows + 1, util.countRows(table)); + assertEquals("LoadIncrementalHFiles should put expected data in table", expectedRows + 1, + util.countRows(table)); // should have a second StoreFile now assertEquals(2, fs.listStatus(storePath).length); @@ -1424,16 +1368,17 @@ public void manualTest(String args[]) throws Exception { Table table = util.createTable(tname, FAMILIES, splitKeys); } else if ("incremental".equals(args[0])) { TableName tname = TableName.valueOf(args[1]); - try(Connection c = ConnectionFactory.createConnection(conf); + try (Connection c = ConnectionFactory.createConnection(conf); Admin admin = c.getAdmin(); RegionLocator regionLocator = c.getRegionLocator(tname)) { Path outDir = new Path("incremental-out"); - runIncrementalPELoad(conf, Arrays.asList(new HFileOutputFormat2.TableInfo(admin - .getTableDescriptor(tname), regionLocator)), outDir, false); + runIncrementalPELoad(conf, + Arrays.asList( + new HFileOutputFormat2.TableInfo(admin.getTableDescriptor(tname), regionLocator)), + outDir, false); } } else { - throw new RuntimeException( - "usage: TestHFileOutputFormat2 newtable | incremental"); + throw new RuntimeException("usage: TestHFileOutputFormat2 newtable | incremental"); } } @@ -1443,9 +1388,10 @@ public void testBlockStoragePolicy() throws Exception { Configuration conf = util.getConfiguration(); conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY, "ALL_SSD"); - conf.set(HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + - Bytes.toString(HFileOutputFormat2.combineTableNameSuffix( - TABLE_NAMES[0].getName(), FAMILIES[0])), "ONE_SSD"); + conf.set( + HFileOutputFormat2.STORAGE_POLICY_PROPERTY_CF_PREFIX + Bytes.toString( + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0])), + "ONE_SSD"); Path cf1Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[0])); Path cf2Dir = new Path(util.getDataTestDir(), Bytes.toString(FAMILIES[1])); util.startMiniDFSCluster(3); @@ -1464,9 +1410,9 @@ public void testBlockStoragePolicy() throws Exception { // alter table cf schema to change storage policies HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[0]), cf1Dir); HFileOutputFormat2.configureStoragePolicy(conf, fs, - HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); + HFileOutputFormat2.combineTableNameSuffix(TABLE_NAMES[0].getName(), FAMILIES[1]), cf2Dir); spA = getStoragePolicyName(fs, cf1Dir); spB = getStoragePolicyName(fs, cf2Dir); LOG.debug("Storage policy of cf 0: [" + spA + "]."); @@ -1553,7 +1499,7 @@ public void TestConfigureCompression() throws Exception { HFile.Reader reader = HFile.createReader(fs, keyFileStatus.getPath(), new CacheConfig(conf), true, conf); assertEquals(reader.getTrailer().getCompressionCodec().getName(), - hfileoutputformatCompression); + hfileoutputformatCompression); } } finally { if (writer != null && context != null) { @@ -1575,8 +1521,8 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { for (int i = 0; i < hostCount; ++i) { hostnames[i] = "datanode_" + i; } - StartMiniClusterOption option = StartMiniClusterOption.builder() - .numRegionServers(hostCount).dataNodeHosts(hostnames).build(); + StartMiniClusterOption option = StartMiniClusterOption.builder().numRegionServers(hostCount) + .dataNodeHosts(hostnames).build(); util.startMiniCluster(option); // Start cluster B @@ -1590,7 +1536,7 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { TableName tableName = TableName.valueOf("table"); // Create table in cluster B try (Table table = utilB.createTable(tableName, FAMILIES, splitKeys); - RegionLocator r = utilB.getConnection().getRegionLocator(tableName)) { + RegionLocator r = utilB.getConnection().getRegionLocator(tableName)) { // Generate the bulk load files // Job has zookeeper configuration for cluster A // Assume reading from cluster A by TableInputFormat and creating hfiles to cluster B @@ -1620,7 +1566,7 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { assertTrue(job.waitForCompletion(true)); final List configs = - ConfigurationCaptorConnection.getCapturedConfigarutions(key); + ConfigurationCaptorConnection.getCapturedConfigarutions(key); assertFalse(configs.isEmpty()); for (Configuration config : configs) { @@ -1631,8 +1577,7 @@ public void testMRIncrementalLoadWithLocalityMultiCluster() throws Exception { assertEquals(confB.get(HConstants.ZOOKEEPER_ZNODE_PARENT), config.get(HConstants.ZOOKEEPER_ZNODE_PARENT)); - assertEquals(bSpecificConfigValue, - config.get(bSpecificConfigKey)); + assertEquals(bSpecificConfigValue, config.get(bSpecificConfigKey)); } } finally { utilB.deleteTable(tableName); @@ -1650,7 +1595,7 @@ private static class ConfigurationCaptorConnection implements Connection { private final Connection delegate; public ConfigurationCaptorConnection(Configuration conf, ExecutorService es, User user) - throws IOException { + throws IOException { Configuration confForDelegate = new Configuration(conf); confForDelegate.unset(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL); delegate = createConnection(confForDelegate, es, user); @@ -1730,8 +1675,7 @@ public TableBuilder getTableBuilder(TableName tableName, ExecutorService pool) { } @Override - public Hbck getHbck() - throws IOException { + public Hbck getHbck() throws IOException { return delegate.getHbck(); } @@ -1752,4 +1696,3 @@ public boolean isAborted() { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java index 77245f3c360d..a28a47eb89cb 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHRegionPartitioner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestHRegionPartitioner { @ClassRule @@ -65,8 +65,8 @@ public void testHRegionPartitioner() throws Exception { byte[][] families = { Bytes.toBytes("familyA"), Bytes.toBytes("familyB") }; - UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, - Bytes.toBytes("aa"), Bytes.toBytes("cc"), 3); + UTIL.createTable(TableName.valueOf(name.getMethodName()), families, 1, Bytes.toBytes("aa"), + Bytes.toBytes("cc"), 3); HRegionPartitioner partitioner = new HRegionPartitioner<>(); Configuration configuration = UTIL.getConfiguration(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java index ffcc900acc7f..b2cffca271a1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestHashTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -85,9 +85,9 @@ public void testHashTable() throws Exception { int numRegions = 10; int numHashFiles = 3; - byte[][] splitRows = new byte[numRegions-1][]; + byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 1; i < numRegions; i++) { - splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); + splitRows[i - 1] = Bytes.toBytes(numRows * i / numRegions); } long timestamp = 1430764183454L; @@ -107,13 +107,9 @@ public void testHashTable() throws Exception { Path testDir = TEST_UTIL.getDataTestDirOnTestFS(tableName.getNameAsString()); long batchSize = 300; - int code = hashTable.run(new String[] { - "--batchsize=" + batchSize, - "--numhashfiles=" + numHashFiles, - "--scanbatch=2", - tableName.getNameAsString(), - testDir.toString() - }); + int code = + hashTable.run(new String[] { "--batchsize=" + batchSize, "--numhashfiles=" + numHashFiles, + "--scanbatch=2", tableName.getNameAsString(), testDir.toString() }); assertEquals("test job failed", 0, code); FileSystem fs = TEST_UTIL.getTestFileSystem(); @@ -127,29 +123,29 @@ public void testHashTable() throws Exception { LOG.debug("partition: " + Bytes.toInt(bytes.get())); } - ImmutableMap expectedHashes - = ImmutableMap.builder() - .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) - .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) - .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) - .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) - .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) - .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) - .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) - .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) - .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) - .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) - .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) - .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) - .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) - .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) - .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) - .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) - .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) - .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) - .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) - .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) - .build(); + ImmutableMap expectedHashes = + ImmutableMap. builder() + .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) + .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) + .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) + .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) + .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) + .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) + .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) + .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) + .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) + .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) + .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) + .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) + .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) + .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) + .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) + .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) + .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) + .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) + .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) + .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) + .build(); Map actualHashes = new HashMap<>(); Path dataDir = new Path(testDir, HashTable.HASH_DATA_DIR); @@ -166,7 +162,7 @@ public void testHashTable() throws Exception { int intKey = -1; if (key.getLength() > 0) { - intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); + intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); } if (actualHashes.containsKey(intKey)) { Assert.fail("duplicate key in data files: " + intKey); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java index 4e56e4face15..c474d92a6bae 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportExport.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -109,8 +109,8 @@ /** * Tests the table import and table export MR job functionality */ -@Category({VerySlowMapReduceTests.class, MediumTests.class}) -//TODO : Remove this in 3.0 +@Category({ VerySlowMapReduceTests.class, MediumTests.class }) +// TODO : Remove this in 3.0 public class TestImportExport { @ClassRule @@ -134,7 +134,7 @@ public class TestImportExport { private static final long now = EnvironmentEdgeManager.currentTime(); private final TableName EXPORT_TABLE = TableName.valueOf("export_table"); private final TableName IMPORT_TABLE = TableName.valueOf("import_table"); - public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); + public static final byte TEST_TAG_TYPE = (byte) (Tag.CUSTOM_TAG_TYPE_RANGE + 1); public static final String TEST_ATTR = "source_op"; public static final String TEST_TAG = "test_tag"; @@ -144,7 +144,7 @@ public static void beforeClass() throws Throwable { UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); UTIL.startMiniCluster(); FQ_OUTPUT_DIR = - new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); + new Path(OUTPUT_DIR).makeQualified(FileSystem.get(UTIL.getConfiguration())).toString(); } @AfterClass @@ -228,48 +228,43 @@ public void testSimpleCase() throws Throwable { t.put(p); } - String[] args = new String[] { - // Only export row1 & row2. - "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", - "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export - }; - assertTrue(runExport(args)); + String[] args = new String[] { + // Only export row1 & row2. + "-D" + TableInputFormat.SCAN_ROW_START + "=\\x32row1", + "-D" + TableInputFormat.SCAN_ROW_STOP + "=\\x32row3", name.getMethodName(), FQ_OUTPUT_DIR, + "1000", // max number of key versions per key to export + }; + assertTrue(runExport(args)); - final String IMPORT_TABLE = name.getMethodName() + "import"; - try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { - args = new String[] { - "-D" + Import.CF_RENAME_PROP + "="+FAMILYA_STRING+":"+FAMILYB_STRING, - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; - assertTrue(runImport(args)); - - Get g = new Get(ROW1); - g.setMaxVersions(); - Result r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW2); - g.setMaxVersions(); - r = t.get(g); - assertEquals(3, r.size()); - g = new Get(ROW3); - r = t.get(g); - assertEquals(0, r.size()); - } + final String IMPORT_TABLE = name.getMethodName() + "import"; + try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), FAMILYB, 3);) { + args = + new String[] { "-D" + Import.CF_RENAME_PROP + "=" + FAMILYA_STRING + ":" + FAMILYB_STRING, + IMPORT_TABLE, FQ_OUTPUT_DIR }; + assertTrue(runImport(args)); + + Get g = new Get(ROW1); + g.setMaxVersions(); + Result r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW2); + g.setMaxVersions(); + r = t.get(g); + assertEquals(3, r.size()); + g = new Get(ROW3); + r = t.get(g); + assertEquals(0, r.size()); + } } /** * Test export hbase:meta table - * * @throws Throwable */ @Test public void testMetaExport() throws Throwable { - String[] args = new String[] { TableName.META_TABLE_NAME.getNameAsString(), - FQ_OUTPUT_DIR, "1", "0", "0" }; + String[] args = + new String[] { TableName.META_TABLE_NAME.getNameAsString(), FQ_OUTPUT_DIR, "1", "0", "0" }; assertTrue(runExport(args)); } @@ -293,34 +288,26 @@ public void testImport94Table() throws Throwable { fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name)); String IMPORT_TABLE = name; try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3);) { - String[] args = new String[] { - "-Dhbase.import.version=0.94" , - IMPORT_TABLE, FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-Dhbase.import.version=0.94", IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - /* exportedTableIn94Format contains 5 rows - ROW COLUMN+CELL - r1 column=f1:c1, timestamp=1383766761171, value=val1 - r2 column=f1:c1, timestamp=1383766771642, value=val2 - r3 column=f1:c1, timestamp=1383766777615, value=val3 - r4 column=f1:c1, timestamp=1383766785146, value=val4 - r5 column=f1:c1, timestamp=1383766791506, value=val5 - */ - assertEquals(5, UTIL.countRows(t)); + /* + * exportedTableIn94Format contains 5 rows ROW COLUMN+CELL r1 column=f1:c1, + * timestamp=1383766761171, value=val1 r2 column=f1:c1, timestamp=1383766771642, value=val2 r3 + * column=f1:c1, timestamp=1383766777615, value=val3 r4 column=f1:c1, timestamp=1383766785146, + * value=val4 r5 column=f1:c1, timestamp=1383766791506, value=val5 + */ + assertEquals(5, UTIL.countRows(t)); } } /** * Test export scanner batching */ - @Test - public void testExportScannerBatching() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(1) - .build()) - .build(); + @Test + public void testExportScannerBatching() throws Throwable { + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(1).build()).build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { @@ -332,11 +319,11 @@ public void testExportScannerBatching() throws Throwable { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - String[] args = new String[] { - "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added scanner batching arg. - name.getMethodName(), - FQ_OUTPUT_DIR - }; + String[] args = new String[] { "-D" + ExportUtils.EXPORT_BATCHING + "=" + EXPORT_BATCH_SIZE, // added + // scanner + // batching + // arg. + name.getMethodName(), FQ_OUTPUT_DIR }; assertTrue(runExport(args)); FileSystem fs = FileSystem.get(UTIL.getConfiguration()); @@ -346,12 +333,10 @@ public void testExportScannerBatching() throws Throwable { @Test public void testWithDeletes() throws Throwable { - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { @@ -364,35 +349,26 @@ public void testWithDeletes() throws Throwable { p.addColumn(FAMILYA, QUAL, now + 4, QUAL); t.put(p); - Delete d = new Delete(ROW1, now+3); + Delete d = new Delete(ROW1, now + 3); t.delete(d); d = new Delete(ROW1); - d.addColumns(FAMILYA, QUAL, now+2); + d.addColumns(FAMILYA, QUAL, now + 2); t.delete(d); } - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - name.getMethodName(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", name.getMethodName(), + FQ_OUTPUT_DIR, "1000", // max number of key versions per key to export }; assertTrue(runExport(args)); final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); try (Table t = UTIL.getConnection().getTable(desc.getTableName());) { - args = new String[] { - IMPORT_TABLE, - FQ_OUTPUT_DIR - }; + args = new String[] { IMPORT_TABLE, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -402,71 +378,60 @@ public void testWithDeletes() throws Throwable { Result r = scanner.next(); Cell[] res = r.rawCells(); assertTrue(PrivateCellUtil.isDeleteFamily(res[0])); - assertEquals(now+4, res[1].getTimestamp()); - assertEquals(now+3, res[2].getTimestamp()); + assertEquals(now + 4, res[1].getTimestamp()); + assertEquals(now + 3, res[2].getTimestamp()); assertTrue(CellUtil.isDelete(res[3])); - assertEquals(now+2, res[4].getTimestamp()); - assertEquals(now+1, res[5].getTimestamp()); + assertEquals(now + 2, res[4].getTimestamp()); + assertEquals(now + 1, res[5].getTimestamp()); assertEquals(now, res[6].getTimestamp()); } } - @Test public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) .build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); exportT.delete(d); - //Add second version of QUAL + // Add second version of QUAL p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now + 5, "s".getBytes()); exportT.put(p); - //Add second Delete family marker - d = new Delete(ROW1, now+7); + // Add second Delete family marker + d = new Delete(ROW1, now + 7); exportT.delete(d); - - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key + // to export }; assertTrue(runExport(args)); final String importTable = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(importTable)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(importTable)) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .build(); UTIL.getAdmin().createTable(desc); Table importT = UTIL.getConnection().getTable(TableName.valueOf(importTable)); - args = new String[] { - importTable, - FQ_OUTPUT_DIR - }; + args = new String[] { importTable, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); Scan s = new Scan(); @@ -477,11 +442,11 @@ public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Thro Result importedTResult = importedTScanner.next(); ResultScanner exportedTScanner = exportT.getScanner(s); - Result exportedTResult = exportedTScanner.next(); + Result exportedTResult = exportedTScanner.next(); try { Result.compareResults(exportedTResult, importedTResult); } catch (Throwable e) { - fail("Original and imported tables data comparision failed with error:"+e.getMessage()); + fail("Original and imported tables data comparision failed with error:" + e.getMessage()); } finally { exportT.close(); importT.close(); @@ -489,18 +454,15 @@ public void testWithMultipleDeleteFamilyMarkersOfSameRowSameFamily() throws Thro } /** - * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, + * Create a simple table, run an Export Job on it, Import with filtering on, verify counts, * attempt with invalid values. */ @Test public void testWithFilter() throws Throwable { // Create simple table to export - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(name.getMethodName())) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + TableDescriptor desc = + TableDescriptorBuilder.newBuilder(TableName.valueOf(name.getMethodName())).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()).build(); UTIL.getAdmin().createTable(desc); Table exportTable = UTIL.getConnection().getTable(desc.getTableName()); @@ -523,19 +485,14 @@ public void testWithFilter() throws Throwable { // Import to a new table final String IMPORT_TABLE = name.getMethodName() + "import"; - desc = TableDescriptorBuilder - .newBuilder(TableName.valueOf(IMPORT_TABLE)) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .build()) - .build(); + desc = TableDescriptorBuilder.newBuilder(TableName.valueOf(IMPORT_TABLE)).setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5).build()).build(); UTIL.getAdmin().createTable(desc); Table importTable = UTIL.getConnection().getTable(desc.getTableName()); args = new String[] { "-D" + Import.FILTER_CLASS_CONF_KEY + "=" + PrefixFilter.class.getName(), "-D" + Import.FILTER_ARGS_CONF_KEY + "=" + Bytes.toString(ROW1), IMPORT_TABLE, - FQ_OUTPUT_DIR, - "1000" }; + FQ_OUTPUT_DIR, "1000" }; assertTrue(runImport(args)); // get the count of the source table for that time range @@ -583,7 +540,7 @@ private int getCount(Table table, Filter filter) throws IOException { public void testImportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -613,29 +570,19 @@ public void testExportScan() throws Exception { String prefix = "row"; String label_0 = "label_0"; String label_1 = "label_1"; - String[] args = { - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + String[] args = { "table", "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Scan scan = ExportUtils.getScanFromCommandLine(UTIL.getConfiguration(), args); assertEquals(version, scan.getMaxVersions()); assertEquals(startTime, scan.getTimeRange().getMin()); assertEquals(endTime, scan.getTimeRange().getMax()); assertEquals(true, (scan.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); - String[] argsWithLabels = { - "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, - "table", - "outputDir", - String.valueOf(version), - String.valueOf(startTime), - String.valueOf(endTime), - prefix - }; + assertEquals(0, + Bytes.compareTo(((PrefixFilter) scan.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + String[] argsWithLabels = + { "-D " + ExportUtils.EXPORT_VISIBILITY_LABELS + "=" + label_0 + "," + label_1, "table", + "outputDir", String.valueOf(version), String.valueOf(startTime), + String.valueOf(endTime), prefix }; Configuration conf = new Configuration(UTIL.getConfiguration()); // parse the "-D" options String[] otherArgs = new GenericOptionsParser(conf, argsWithLabels).getRemainingArgs(); @@ -644,7 +591,8 @@ public void testExportScan() throws Exception { assertEquals(startTime, scanWithLabels.getTimeRange().getMin()); assertEquals(endTime, scanWithLabels.getTimeRange().getMax()); assertEquals(true, (scanWithLabels.getFilter() instanceof PrefixFilter)); - assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), Bytes.toBytesBinary(prefix))); + assertEquals(0, Bytes.compareTo(((PrefixFilter) scanWithLabels.getFilter()).getPrefix(), + Bytes.toBytesBinary(prefix))); assertEquals(2, scanWithLabels.getAuthorizations().getLabels().size()); assertEquals(label_0, scanWithLabels.getAuthorizations().getLabels().get(0)); assertEquals(label_1, scanWithLabels.getAuthorizations().getLabels().get(1)); @@ -657,7 +605,7 @@ public void testExportScan() throws Exception { public void testExportMain() throws Throwable { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -670,11 +618,10 @@ public void testExportMain() throws Throwable { assertEquals(-1, newSecurityManager.getExitCode()); String errMsg = data.toString(); assertTrue(errMsg.contains("Wrong number of arguments:")); - assertTrue(errMsg.contains( - "Usage: Export [-D ]* [ " + - "[ []] [^[regex pattern] or [Prefix] to filter]]")); assertTrue( - errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); + errMsg.contains("Usage: Export [-D ]* [ " + + "[ []] [^[regex pattern] or [Prefix] to filter]]")); + assertTrue(errMsg.contains("-D hbase.mapreduce.scan.column.family=,, ...")); assertTrue(errMsg.contains("-D hbase.mapreduce.include.deleted.rows=true")); assertTrue(errMsg.contains("-D hbase.client.scanner.caching=100")); assertTrue(errMsg.contains("-D hbase.export.scanner.batch=10")); @@ -721,8 +668,8 @@ public Void answer(InvocationOnMock invocation) throws Throwable { } /** - * Test addFilterAndArguments method of Import This method set couple - * parameters into Configuration + * Test addFilterAndArguments method of Import This method set couple parameters into + * Configuration */ @Test public void testAddFilterAndArguments() throws IOException { @@ -734,7 +681,7 @@ public void testAddFilterAndArguments() throws IOException { Import.addFilterAndArguments(configuration, FilterBase.class, args); assertEquals("org.apache.hadoop.hbase.filter.FilterBase", - configuration.get(Import.FILTER_CLASS_CONF_KEY)); + configuration.get(Import.FILTER_CLASS_CONF_KEY)); assertEquals("param1,param2", configuration.get(Import.FILTER_ARGS_CONF_KEY)); } @@ -758,7 +705,7 @@ public void testDurability() throws Throwable { exportTable.put(put); // Run the export - String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000"}; + String[] args = new String[] { exportTableName, FQ_OUTPUT_DIR, "1000" }; assertTrue(runExport(args)); // Create the table for import @@ -773,13 +720,12 @@ public void testDurability() throws Throwable { wal.registerWALActionsListener(walListener); // Run the import with SKIP_WAL - args = - new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), - importTableName, FQ_OUTPUT_DIR }; + args = new String[] { "-D" + Import.WAL_DURABILITY + "=" + Durability.SKIP_WAL.name(), + importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is not visisted + // Assert that the wal is not visisted assertTrue(!walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); // Run the import with the default durability option @@ -792,16 +738,16 @@ public void testDurability() throws Throwable { wal.registerWALActionsListener(walListener); args = new String[] { importTableName, FQ_OUTPUT_DIR }; assertTrue(runImport(args)); - //Assert that the wal is visisted + // Assert that the wal is visisted assertTrue(walListener.isWALVisited()); - //Ensure that the count is 2 (only one version of key value is obtained) + // Ensure that the count is 2 (only one version of key value is obtained) assertTrue(getCount(importTable, null) == 2); } } /** - * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to - * identify that an entry is written to the Write Ahead Log for the given table. + * This listens to the {@link #visitLogEntryBeforeWrite(RegionInfo, WALKey, WALEdit)} to identify + * that an entry is written to the Write Ahead Log for the given table. */ private static class TableWALActionListener implements WALActionsListener { @@ -815,7 +761,7 @@ public TableWALActionListener(RegionInfo region) { @Override public void visitLogEntryBeforeWrite(RegionInfo info, WALKey logKey, WALEdit logEdit) { if (logKey.getTableName().getNameAsString().equalsIgnoreCase( - this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { + this.regionInfo.getTable().getNameAsString()) && (!logEdit.isMetaEdit())) { isVisited = true; } } @@ -826,45 +772,39 @@ public boolean isWALVisited() { } /** - * Add cell tags to delete mutations, run export and import tool and - * verify that tags are present in import table also. + * Add cell tags to delete mutations, run export and import tool and verify that tags are present + * in import table also. * @throws Throwable throws Throwable. */ @Test public void testTagsAddition() throws Throwable { final TableName exportTable = TableName.valueOf(name.getMethodName()); - TableDescriptor desc = TableDescriptorBuilder - .newBuilder(exportTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor desc = TableDescriptorBuilder.newBuilder(exportTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(desc); Table exportT = UTIL.getConnection().getTable(exportTable); - //Add first version of QUAL + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); exportT.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); // Add test attribute to delete mutation. d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); exportT.delete(d); // Run export tool with KeyValueCodecWithTags as Codec. This will ensure that export tool // will use KeyValueCodecWithTags. - String[] args = new String[] { - "-D" + ExportUtils.RAW_SCAN + "=true", - // This will make sure that codec will encode and decode tags in rpc call. - "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - exportTable.getNameAsString(), - FQ_OUTPUT_DIR, - "1000", // max number of key versions per key to export + String[] args = new String[] { "-D" + ExportUtils.RAW_SCAN + "=true", + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + exportTable.getNameAsString(), FQ_OUTPUT_DIR, "1000", // max number of key versions per key + // to export }; assertTrue(runExport(args)); // Assert tag exists in exportTable @@ -872,23 +812,17 @@ public void testTagsAddition() throws Throwable { // Create an import table with MetadataController. final TableName importTable = TableName.valueOf("importWithTestTagsAddition"); - TableDescriptor importTableDesc = TableDescriptorBuilder - .newBuilder(importTable) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor importTableDesc = TableDescriptorBuilder.newBuilder(importTable) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(importTableDesc); // Run import tool. args = new String[] { - // This will make sure that codec will encode and decode tags in rpc call. - "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", - importTable.getNameAsString(), - FQ_OUTPUT_DIR - }; + // This will make sure that codec will encode and decode tags in rpc call. + "-Dhbase.client.rpc.codec=org.apache.hadoop.hbase.codec.KeyValueCodecWithTags", + importTable.getNameAsString(), FQ_OUTPUT_DIR }; assertTrue(runImport(args)); // Make sure that tags exists in imported table. checkWhetherTagExists(importTable, true); @@ -911,7 +845,7 @@ private void checkWhetherTagExists(TableName table, boolean tagExists) throws IO } } boolean deleteFound = false; - for (Cell cell: values) { + for (Cell cell : values) { if (PrivateCellUtil.isDelete(cell.getType().getCode())) { deleteFound = true; List tags = PrivateCellUtil.getTags(cell); @@ -931,7 +865,7 @@ private void checkWhetherTagExists(TableName table, boolean tagExists) throws IO } /* - This co-proc will add a cell tag to delete mutation. + * This co-proc will add a cell tag to delete mutation. */ public static class MetadataController implements RegionCoprocessor, RegionObserver { @Override @@ -941,8 +875,7 @@ public Optional getRegionObserver() { @Override public void preBatchMutate(ObserverContext c, - MiniBatchOperationInProgress miniBatchOp) - throws IOException { + MiniBatchOperationInProgress miniBatchOp) throws IOException { if (c.getEnvironment().getRegion().getRegionInfo().getTable().isSystemTable()) { return; } @@ -957,7 +890,7 @@ public void preBatchMutate(ObserverContext c, } Tag sourceOpTag = new ArrayBackedTag(TEST_TAG_TYPE, sourceOpAttr); List updatedCells = new ArrayList<>(); - for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance(); ) { + for (CellScanner cellScanner = m.cellScanner(); cellScanner.advance();) { Cell cell = cellScanner.current(); List tags = PrivateCellUtil.getTags(cell); tags.add(sourceOpTag); @@ -975,34 +908,30 @@ public void preBatchMutate(ObserverContext c, } /** - * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string - * This means it will use no Codec. Make sure that we don't return Tags in response. + * Set hbase.client.rpc.codec and hbase.client.default.rpc.codec both to empty string This means + * it will use no Codec. Make sure that we don't return Tags in response. * @throws Exception Exception */ @Test public void testTagsWithEmptyCodec() throws Exception { TableName tableName = TableName.valueOf(name.getMethodName()); - TableDescriptor tableDesc = TableDescriptorBuilder - .newBuilder(tableName) - .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA) - .setMaxVersions(5) - .setKeepDeletedCells(KeepDeletedCells.TRUE) - .build()) - .setCoprocessor(MetadataController.class.getName()) - .build(); + TableDescriptor tableDesc = TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(FAMILYA).setMaxVersions(5) + .setKeepDeletedCells(KeepDeletedCells.TRUE).build()) + .setCoprocessor(MetadataController.class.getName()).build(); UTIL.getAdmin().createTable(tableDesc); Configuration conf = new Configuration(UTIL.getConfiguration()); conf.set(RPC_CODEC_CONF_KEY, ""); conf.set(DEFAULT_CODEC_CLASS, ""); try (Connection connection = ConnectionFactory.createConnection(conf); - Table table = connection.getTable(tableName)) { - //Add first version of QUAL + Table table = connection.getTable(tableName)) { + // Add first version of QUAL Put p = new Put(ROW1); p.addColumn(FAMILYA, QUAL, now, QUAL); table.put(p); - //Add Delete family marker - Delete d = new Delete(ROW1, now+3); + // Add Delete family marker + Delete d = new Delete(ROW1, now + 3); // Add test attribute to delete mutation. d.setAttribute(TEST_ATTR, Bytes.toBytes(TEST_TAG)); table.delete(d); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java index 83a457ff0710..6fff39372b6e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithOperationAttributes.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -65,7 +65,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithOperationAttributes implements Configurable { @ClassRule @@ -78,8 +78,7 @@ public class TestImportTSVWithOperationAttributes implements Configurable { protected static HBaseTestingUtility util = new HBaseTestingUtility(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -153,13 +152,10 @@ public void testMROnTableWithInvalidOperationAttr() throws Exception { } /** - * Run an ImportTsv job and perform basic validation on the results. Returns - * the ImportTsv Tool instance so that other tests can inspect it - * for further validation as necessary. This method is static to insure - * non-reliance on instance's util/conf facilities. - * - * @param args - * Any arguments to pass BEFORE inputFile path is appended. + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. + * @param args Any arguments to pass BEFORE inputFile path is appended. * @param dataAvailable * @return The Tool instance used to run the test. */ @@ -199,7 +195,6 @@ private Tool doMROnTableTest(HBaseTestingUtility util, String family, String dat /** * Confirm ImportTsv via data in online table. - * * @param dataAvailable */ private static void validateTable(Configuration conf, TableName tableName, String family, @@ -224,9 +219,10 @@ private static void validateTable(Configuration conf, TableName tableName, Strin List kvs = res.listCells(); assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); - assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), - Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. verified = true; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java index b8b6bb811558..01f80e94d8c4 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithTTLs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -55,7 +55,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithTTLs implements Configurable { @ClassRule @@ -67,8 +67,7 @@ public class TestImportTSVWithTTLs implements Configurable { protected static HBaseTestingUtility util = new HBaseTestingUtility(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -114,8 +113,7 @@ public void testMROnTable() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_TTL", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001b1000000\n"; @@ -131,8 +129,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, S // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified(new Path(util - .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = fs + .makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); op.write(Bytes.toBytes(data)); op.close(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java index cad5b49bd6c8..5de93c282a5d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTSVWithVisibilityLabels.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -75,7 +75,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestImportTSVWithVisibilityLabels implements Configurable { @ClassRule @@ -88,8 +88,7 @@ public class TestImportTSVWithVisibilityLabels implements Configurable { protected static HBaseTestingUtility util = new HBaseTestingUtility(); /** - * Delete the tmp directory after running doMROnTableTest. Boolean. Default is - * false. + * Delete the tmp directory after running doMROnTableTest. Boolean. Default is false. */ protected static final String DELETE_AFTER_LOAD_CONF = NAME + ".deleteAfterLoad"; @@ -124,10 +123,10 @@ public void setConf(Configuration conf) { public static void provisionCluster() throws Exception { conf = util.getConfiguration(); SUPERUSER = User.createUserForTesting(conf, "admin", new String[] { "supergroup" }); - conf.set("hbase.superuser", "admin,"+User.getCurrent().getName()); + conf.set("hbase.superuser", "admin," + User.getCurrent().getName()); VisibilityTestUtil.enableVisiblityLabels(conf); conf.setClass(VisibilityUtils.VISIBILITY_LABEL_GENERATOR_CLASS, SimpleScanLabelGenerator.class, - ScanLabelGenerator.class); + ScanLabelGenerator.class); util.startMiniCluster(); // Wait for the labels table to become available util.waitTableEnabled(VisibilityConstants.LABELS_TABLE_NAME.getName(), 50000); @@ -137,19 +136,19 @@ public static void provisionCluster() throws Exception { private static void createLabels() throws IOException, InterruptedException { PrivilegedExceptionAction action = new PrivilegedExceptionAction() { - @Override - public VisibilityLabelsResponse run() throws Exception { - String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; - try (Connection conn = ConnectionFactory.createConnection(conf)) { - VisibilityClient.addLabels(conn, labels); - LOG.info("Added labels "); - } catch (Throwable t) { - LOG.error("Error in adding labels" , t); - throw new IOException(t); - } - return null; - } - }; + @Override + public VisibilityLabelsResponse run() throws Exception { + String[] labels = { SECRET, TOPSECRET, CONFIDENTIAL, PUBLIC, PRIVATE }; + try (Connection conn = ConnectionFactory.createConnection(conf)) { + VisibilityClient.addLabels(conn, labels); + LOG.info("Added labels "); + } catch (Throwable t) { + LOG.error("Error in adding labels", t); + throw new IOException(t); + } + return null; + } + }; SUPERUSER.runAs(action); } @@ -164,8 +163,7 @@ public void testMROnTable() throws Exception { // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; @@ -231,10 +229,8 @@ public void testMROnTableWithBulkload() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = new String[] { - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; String data = "KEY\u001bVALUE1\u001bVALUE2\u001bsecret&private\n"; util.createTable(tableName, FAMILY); @@ -246,18 +242,15 @@ public void testMROnTableWithBulkload() throws Exception { public void testBulkOutputWithTsvImporterTextMapper() throws Exception { final TableName table = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); String FAMILY = "FAM"; - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - table.getNameAsString() - }; + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), + table.getNameAsString() }; String data = "KEY\u001bVALUE4\u001bVALUE8\u001bsecret&private\n"; doMROnTableTest(util, FAMILY, data, args, 4); util.deleteTable(table); @@ -269,8 +262,7 @@ public void testMRWithOutputFormat() throws Exception { Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. String[] args = new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", + "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterMapper", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; @@ -285,10 +277,9 @@ public void testBulkOutputWithInvalidLabels() throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; // 2 Data rows, one with valid label and one with invalid label String data = @@ -303,13 +294,12 @@ public void testBulkOutputWithTsvImporterTextMapperWithInvalidLabels() throws Ex final TableName tableName = TableName.valueOf(name.getMethodName() + util.getRandomUUID()); Path hfiles = new Path(util.getDataTestDirOnTestFS(tableName.getNameAsString()), "hfiles"); // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B,HBASE_CELL_VISIBILITY", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", tableName.getNameAsString() }; // 2 Data rows, one with valid label and one with invalid label String data = @@ -325,27 +315,22 @@ protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, S } /** - * Run an ImportTsv job and perform basic validation on the results. Returns - * the ImportTsv Tool instance so that other tests can inspect it - * for further validation as necessary. This method is static to insure - * non-reliance on instance's util/conf facilities. - * - * @param args - * Any arguments to pass BEFORE inputFile path is appended. - * + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. + * @param args Any arguments to pass BEFORE inputFile path is appended. * @param expectedKVCount Expected KV count. pass -1 to skip the kvcount check - * * @return The Tool instance used to run the test. */ protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data, - String[] args, int valueMultiplier,int expectedKVCount) throws Exception { + String[] args, int valueMultiplier, int expectedKVCount) throws Exception { TableName table = TableName.valueOf(args[args.length - 1]); Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified(new Path(util - .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = fs + .makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); if (data == null) { data = "KEY\u001bVALUE1\u001bVALUE2\n"; @@ -380,10 +365,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, S } } LOG.debug("validating the table " + createdHFiles); - if (createdHFiles) - validateHFiles(fs, outputPath, family,expectedKVCount); - else - validateTable(conf, table, family, valueMultiplier); + if (createdHFiles) validateHFiles(fs, outputPath, family, expectedKVCount); + else validateTable(conf, table, family, valueMultiplier); if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) { LOG.debug("Deleting test subdirectory"); @@ -410,20 +393,21 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami String cf = elements[elements.length - 1]; foundFamilies.add(cf); assertTrue(String.format( - "HFile ouput contains a column family (%s) not present in input families (%s)", cf, - configFamilies), configFamilies.contains(cf)); + "HFile ouput contains a column family (%s) not present in input families (%s)", cf, + configFamilies), configFamilies.contains(cf)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), - hfile.getLen() > 0); + hfile.getLen() > 0); if (expectedKVCount > -1) { actualKVCount += getKVCountFromHfile(fs, hfile.getPath()); } } } if (expectedKVCount > -1) { - assertTrue(String.format( - "KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", actualKVCount, - expectedKVCount), actualKVCount == expectedKVCount); + assertTrue( + String.format("KV count in output hfile=<%d> doesn't match with expected KV count=<%d>", + actualKVCount, expectedKVCount), + actualKVCount == expectedKVCount); } } @@ -443,7 +427,7 @@ private static void validateTable(Configuration conf, TableName tableName, Strin Scan scan = new Scan(); // Scan entire family. scan.addFamily(Bytes.toBytes(family)); - scan.setAuthorizations(new Authorizations("secret","private")); + scan.setAuthorizations(new Authorizations("secret", "private")); ResultScanner resScanner = table.getScanner(scan); Result[] next = resScanner.next(5); assertEquals(1, next.length); @@ -454,8 +438,8 @@ private static void validateTable(Configuration conf, TableName tableName, Strin assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), - Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. } verified = true; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java index 3eee930d0f74..1b466d54e211 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsv.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestImportTsv implements Configurable { @ClassRule @@ -143,11 +143,10 @@ public void testMROnTableWithTimestamp() throws Exception { } @Test - public void testMROnTableWithCustomMapper() - throws Exception { + public void testMROnTableWithCustomMapper() throws Exception { util.createTable(tn, FAMILY); args.put(ImportTsv.MAPPER_CONF_KEY, - "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper"); + "org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper"); doMROnTableTest(null, 3); util.deleteTable(tn); @@ -189,39 +188,34 @@ public void testBulkOutputWithAnExistingTableNoStrictTrue() throws Exception { @Test public void testJobConfigurationsWithTsvImporterTextMapper() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); String INPUT_FILE = "InputFile1.csv"; // Prepare the arguments required for the test. - String[] args = - new String[] { - "-D" + ImportTsv.MAPPER_CONF_KEY - + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", - "-D" + ImportTsv.COLUMNS_CONF_KEY - + "=HBASE_ROW_KEY,FAM:A,FAM:B", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - tn.getNameAsString(), - INPUT_FILE - }; - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); - assertTrue(job.getReducerClass().equals(TextSortReducer.class)); - assertTrue(job.getMapOutputValueClass().equals(Text.class)); - return 0; - } - }, args)); + String[] args = new String[] { + "-D" + ImportTsv.MAPPER_CONF_KEY + + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", + "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), + tn.getNameAsString(), INPUT_FILE }; + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); + assertTrue(job.getReducerClass().equals(TextSortReducer.class)); + assertTrue(job.getMapOutputValueClass().equals(Text.class)); + return 0; + } + }, args)); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @Test public void testBulkOutputWithTsvImporterTextMapper() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.MAPPER_CONF_KEY, "org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, bulkOutputPath.toString()); String data = "KEY\u001bVALUE4\u001bVALUE8\n"; @@ -239,53 +233,49 @@ public void testWithoutAnExistingTableAndCreateTableSetToNo() throws Exception { conf.set(ImportTsv.CREATE_TABLE_CONF_KEY, "no"); exception.expect(TableNotFoundException.class); assertEquals("running test job configuration failed.", 0, - ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { - @Override public int run(String[] args) throws Exception { - createSubmittableJob(getConf(), args); - return 0; - } - }, args)); + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + createSubmittableJob(getConf(), args); + return 0; + } + }, args)); } @Test public void testMRWithoutAnExistingTable() throws Exception { - String[] args = - new String[] { tn.getNameAsString(), "/inputFile" }; + String[] args = new String[] { tn.getNameAsString(), "/inputFile" }; exception.expect(TableNotFoundException.class); - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - createSubmittableJob(getConf(), args); - return 0; - } - }, args)); + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + createSubmittableJob(getConf(), args); + return 0; + } + }, args)); } @Test public void testJobConfigurationsWithDryMode() throws Exception { - Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()),"hfiles"); + Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); String INPUT_FILE = "InputFile1.csv"; // Prepare the arguments required for the test. - String[] argsArray = new String[] { - "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", - "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", - "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), - "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", - tn.getNameAsString(), - INPUT_FILE }; - assertEquals("running test job configuration failed.", 0, ToolRunner.run( - new Configuration(util.getConfiguration()), - new ImportTsv() { - @Override - public int run(String[] args) throws Exception { - Job job = createSubmittableJob(getConf(), args); - assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); - return 0; - } - }, argsArray)); + String[] argsArray = + new String[] { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", + "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", + "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), + "-D" + ImportTsv.DRY_RUN_CONF_KEY + "=true", tn.getNameAsString(), INPUT_FILE }; + assertEquals("running test job configuration failed.", 0, + ToolRunner.run(new Configuration(util.getConfiguration()), new ImportTsv() { + @Override + public int run(String[] args) throws Exception { + Job job = createSubmittableJob(getConf(), args); + assertTrue(job.getOutputFormatClass().equals(NullOutputFormat.class)); + return 0; + } + }, argsArray)); // Delete table created by createSubmittableJob. util.deleteTable(tn); } @@ -301,8 +291,7 @@ public void testDryModeWithoutBulkOutputAndTableExists() throws Exception { } /** - * If table is not present in non-bulk mode, dry run should fail just like - * normal mode. + * If table is not present in non-bulk mode, dry run should fail just like normal mode. */ @Test public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception { @@ -311,7 +300,8 @@ public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception doMROnTableTest(null, 1); } - @Test public void testDryModeWithBulkOutputAndTableExists() throws Exception { + @Test + public void testDryModeWithBulkOutputAndTableExists() throws Exception { util.createTable(tn, FAMILY); // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); @@ -324,12 +314,11 @@ public void testDryModeWithoutBulkOutputAndTableDoesNotExists() throws Exception } /** - * If table is not present in bulk mode and create.table is not set to yes, - * import should fail with TableNotFoundException. + * If table is not present in bulk mode and create.table is not set to yes, import should fail + * with TableNotFoundException. */ @Test - public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() throws - Exception { + public void testDryModeWithBulkOutputAndTableDoesNotExistsCreateTableSetToNo() throws Exception { // Prepare the arguments required for the test. Path hfiles = new Path(util.getDataTestDirOnTestFS(tn.getNameAsString()), "hfiles"); args.put(ImportTsv.BULK_OUTPUT_CONF_KEY, hfiles.toString()); @@ -382,31 +371,30 @@ public void testSkipEmptyColumns() throws Exception { } private Tool doMROnTableTest(String data, int valueMultiplier) throws Exception { - return doMROnTableTest(util, tn, FAMILY, data, args, valueMultiplier,-1); + return doMROnTableTest(util, tn, FAMILY, data, args, valueMultiplier, -1); } - protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, - String family, String data, Map args) throws Exception { - return doMROnTableTest(util, table, family, data, args, 1,-1); + protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, String family, + String data, Map args) throws Exception { + return doMROnTableTest(util, table, family, data, args, 1, -1); } /** - * Run an ImportTsv job and perform basic validation on the results. - * Returns the ImportTsv Tool instance so that other tests can - * inspect it for further validation as necessary. This method is static to - * insure non-reliance on instance's util/conf facilities. + * Run an ImportTsv job and perform basic validation on the results. Returns the ImportTsv + * Tool instance so that other tests can inspect it for further validation as + * necessary. This method is static to insure non-reliance on instance's util/conf facilities. * @param args Any arguments to pass BEFORE inputFile path is appended. * @return The Tool instance used to run the test. */ - protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, - String family, String data, Map args, int valueMultiplier,int expectedKVCount) - throws Exception { + protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, String family, + String data, Map args, int valueMultiplier, int expectedKVCount) + throws Exception { Configuration conf = new Configuration(util.getConfiguration()); // populate input file FileSystem fs = FileSystem.get(conf); - Path inputPath = fs.makeQualified( - new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); + Path inputPath = fs + .makeQualified(new Path(util.getDataTestDirOnTestFS(table.getNameAsString()), "input.dat")); FSDataOutputStream op = fs.create(inputPath, true); if (data == null) { data = "KEY\u001bVALUE1\u001bVALUE2\n"; @@ -440,15 +428,14 @@ protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, // Perform basic validation. If the input args did not include // ImportTsv.BULK_OUTPUT_CONF_KEY then validate data in the table. // Otherwise, validate presence of hfiles. - boolean isDryRun = args.containsKey(ImportTsv.DRY_RUN_CONF_KEY) && - "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); + boolean isDryRun = args.containsKey(ImportTsv.DRY_RUN_CONF_KEY) + && "true".equalsIgnoreCase(args.get(ImportTsv.DRY_RUN_CONF_KEY)); if (args.containsKey(ImportTsv.BULK_OUTPUT_CONF_KEY)) { if (isDryRun) { assertFalse(String.format("Dry run mode, %s should not have been created.", - ImportTsv.BULK_OUTPUT_CONF_KEY), - fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); + ImportTsv.BULK_OUTPUT_CONF_KEY), fs.exists(new Path(ImportTsv.BULK_OUTPUT_CONF_KEY))); } else { - validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family,expectedKVCount); + validateHFiles(fs, args.get(ImportTsv.BULK_OUTPUT_CONF_KEY), family, expectedKVCount); } } else { validateTable(conf, table, family, valueMultiplier, isDryRun); @@ -464,8 +451,8 @@ protected static Tool doMROnTableTest(HBaseTestingUtility util, TableName table, /** * Confirm ImportTsv via data in online table. */ - private static void validateTable(Configuration conf, TableName tableName, - String family, int valueMultiplier, boolean isDryRun) throws IOException { + private static void validateTable(Configuration conf, TableName tableName, String family, + int valueMultiplier, boolean isDryRun) throws IOException { LOG.debug("Validating table."); Connection connection = ConnectionFactory.createConnection(conf); @@ -487,7 +474,8 @@ private static void validateTable(Configuration conf, TableName tableName, assertTrue(CellUtil.matchingRows(kvs.get(0), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingRows(kvs.get(1), Bytes.toBytes("KEY"))); assertTrue(CellUtil.matchingValue(kvs.get(0), Bytes.toBytes("VALUE" + valueMultiplier))); - assertTrue(CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); + assertTrue( + CellUtil.matchingValue(kvs.get(1), Bytes.toBytes("VALUE" + 2 * valueMultiplier))); // Only one result set is expected, so let it loop. } if (isDryRun) { @@ -527,14 +515,11 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami String[] elements = cfStatus.getPath().toString().split(Path.SEPARATOR); String cf = elements[elements.length - 1]; foundFamilies.add(cf); - assertTrue( - String.format( - "HFile output contains a column family (%s) not present in input families (%s)", - cf, configFamilies), - configFamilies.contains(cf)); + assertTrue(String.format( + "HFile output contains a column family (%s) not present in input families (%s)", cf, + configFamilies), configFamilies.contains(cf)); for (FileStatus hfile : fs.listStatus(cfStatus.getPath())) { - assertTrue( - String.format("HFile %s appears to contain no data.", hfile.getPath()), + assertTrue(String.format("HFile %s appears to contain no data.", hfile.getPath()), hfile.getLen() > 0); // count the number of KVs from all the hfiles if (expectedKVCount > -1) { @@ -543,11 +528,12 @@ private static void validateHFiles(FileSystem fs, String outputPath, String fami } } assertTrue(String.format("HFile output does not contain the input family '%s'.", family), - foundFamilies.contains(family)); + foundFamilies.contains(family)); if (expectedKVCount > -1) { - assertTrue(String.format( - "KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", actualKVCount, - expectedKVCount), actualKVCount == expectedKVCount); + assertTrue( + String.format("KV count in ouput hfile=<%d> doesn't match with expected KV count=<%d>", + actualKVCount, expectedKVCount), + actualKVCount == expectedKVCount); } } @@ -571,4 +557,3 @@ private static int getKVCountFromHfile(FileSystem fs, Path p) throws IOException return count; } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java index a0d1cf7b6cf9..573cf3eee7d7 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestImportTsvParser.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ /** * Tests for {@link TsvParser}. */ -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestImportTsvParser { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -58,7 +58,7 @@ private void checkParsing(ParsedLine parsed, Iterable expected) { ArrayList parsedCols = new ArrayList<>(); for (int i = 0; i < parsed.getColumnCount(); i++) { parsedCols.add(Bytes.toString(parsed.getLineBytes(), parsed.getColumnOffset(i), - parsed.getColumnLength(i))); + parsed.getColumnLength(i))); } if (!Iterables.elementsEqual(parsedCols, expected)) { fail("Expected: " + Joiner.on(",").join(expected) + "\n" + "Got:" @@ -293,7 +293,7 @@ public void testTsvParseAttributesKey() throws BadTsvLineException { assertEquals(6, parse.getAttributeKeyOffset()); String[] attr = parse.getIndividualAttributes(); int i = 0; - for (String str : attr) { + for (String str : attr) { assertEquals(("key" + i + "=>" + "value" + i), str); i++; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java index d1f48bb299ed..c0ff107df973 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestJarFinder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,7 +51,7 @@ public class TestJarFinder { @Test public void testJar() throws Exception { - //picking a class that is for sure in a JAR in the classpath + // picking a class that is for sure in a JAR in the classpath String jar = JarFinder.getJar(LoggerFactory.class); Assert.assertTrue(new File(jar).exists()); } @@ -59,8 +59,7 @@ public void testJar() throws Exception { private static void delete(File file) throws IOException { if (file.getAbsolutePath().length() < 5) { throw new IllegalArgumentException( - MessageFormat.format("Path [{0}] is too short, not deleting", - file.getAbsolutePath())); + MessageFormat.format("Path [{0}] is too short, not deleting", file.getAbsolutePath())); } if (file.exists()) { if (file.isDirectory()) { @@ -73,16 +72,15 @@ private static void delete(File file) throws IOException { } if (!file.delete()) { throw new RuntimeException( - MessageFormat.format("Could not delete path [{0}]", - file.getAbsolutePath())); + MessageFormat.format("Could not delete path [{0}]", file.getAbsolutePath())); } } } @Test public void testExpandedClasspath() throws Exception { - //picking a class that is for sure in a directory in the classpath - //in this case the JAR is created on the fly + // picking a class that is for sure in a directory in the classpath + // in this case the JAR is created on the fly String jar = JarFinder.getJar(TestJarFinder.class); Assert.assertTrue(new File(jar).exists()); } @@ -90,7 +88,7 @@ public void testExpandedClasspath() throws Exception { @Test public void testExistingManifest() throws Exception { File dir = new File(System.getProperty("test.build.dir", "target/test-dir"), - TestJarFinder.class.getName() + "-testExistingManifest"); + TestJarFinder.class.getName() + "-testExistingManifest"); delete(dir); dir.mkdirs(); @@ -109,8 +107,7 @@ public void testExistingManifest() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); JarOutputStream zos = new JarOutputStream(baos); JarFinder.jarDir(dir, "", zos); - JarInputStream jis = - new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); + JarInputStream jis = new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); Assert.assertNotNull(jis.getManifest()); jis.close(); } @@ -118,7 +115,7 @@ public void testExistingManifest() throws Exception { @Test public void testNoManifest() throws Exception { File dir = new File(System.getProperty("test.build.dir", "target/test-dir"), - TestJarFinder.class.getName() + "-testNoManifest"); + TestJarFinder.class.getName() + "-testNoManifest"); delete(dir); dir.mkdirs(); File propsFile = new File(dir, "props.properties"); @@ -128,8 +125,7 @@ public void testNoManifest() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); JarOutputStream zos = new JarOutputStream(baos); JarFinder.jarDir(dir, "", zos); - JarInputStream jis = - new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); + JarInputStream jis = new JarInputStream(new ByteArrayInputStream(baos.toByteArray())); Assert.assertNotNull(jis.getManifest()); jis.close(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java index b4a53ea65e82..11fc16c04a54 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,11 +30,10 @@ import org.junit.experimental.categories.Category; /** - * Tests various scan start and stop row scenarios. This is set in a scan and - * tested in a MapReduce job to see if that is handed over and done properly - * too. + * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce + * job to see if that is handed over and done properly too. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase { @ClassRule @@ -44,11 +43,11 @@ public class TestMultiTableInputFormat extends MultiTableInputFormatTestBase { @BeforeClass public static void setupLogging() { TEST_UTIL.enableDebug(MultiTableInputFormat.class); - } + } - @Override + @Override protected void initJob(List scans, Job job) throws IOException { - TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job); + TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class, ImmutableBytesWritable.class, + ImmutableBytesWritable.class, job); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java index 906abca05c9d..2a77b2a7c902 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -66,20 +66,20 @@ /** * Tests of MultiTableInputFormatBase. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestMultiTableInputFormatBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestMultiTableInputFormatBase.class); - @Rule public final TestName name = new TestName(); + @Rule + public final TestName name = new TestName(); /** - * Test getSplits only puts up one Connection. - * In past it has put up many Connections. Each Connection setup comes with a fresh new cache - * so we have to do fresh hit on hbase:meta. Should only do one Connection when doing getSplits - * even if a MultiTableInputFormat. + * Test getSplits only puts up one Connection. In past it has put up many Connections. Each + * Connection setup comes with a fresh new cache so we have to do fresh hit on hbase:meta. Should + * only do one Connection when doing getSplits even if a MultiTableInputFormat. * @throws IOException */ @Test @@ -88,8 +88,7 @@ public void testMRSplitsConnectionCount() throws IOException { MultiTableInputFormatBase mtif = new MultiTableInputFormatBase() { @Override public RecordReader createRecordReader(InputSplit split, - TaskAttemptContext context) - throws IOException, InterruptedException { + TaskAttemptContext context) throws IOException, InterruptedException { return super.createRecordReader(split, context); } }; @@ -124,7 +123,7 @@ public static class MRSplitsConnection implements Connection { private final Configuration configuration; static final AtomicInteger creations = new AtomicInteger(0); - MRSplitsConnection (Configuration conf, ExecutorService pool, User user) throws IOException { + MRSplitsConnection(Configuration conf, ExecutorService pool, User user) throws IOException { this.configuration = conf; creations.incrementAndGet(); } @@ -157,31 +156,25 @@ public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws I @Override public RegionLocator getRegionLocator(final TableName tableName) throws IOException { // Make up array of start keys. We start off w/ empty byte array. - final byte [][] startKeys = new byte [][] {HConstants.EMPTY_BYTE_ARRAY, - Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), - Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), - Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), - Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), - Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("zzz")}; + final byte[][] startKeys = new byte[][] { HConstants.EMPTY_BYTE_ARRAY, Bytes.toBytes("aaaa"), + Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), + Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), + Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), + Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), + Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("zzz") }; // Make an array of end keys. We end with the empty byte array. - final byte [][] endKeys = new byte[][] { - Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), - Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), - Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), - Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), - Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), - Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), - Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), - Bytes.toBytes("zzz"), - HConstants.EMPTY_BYTE_ARRAY}; + final byte[][] endKeys = new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), + Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), + Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), + Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), + Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), + Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), + HConstants.EMPTY_BYTE_ARRAY }; // Now make a map of start keys to HRegionLocations. Let the server namber derive from // the start key. - final Map map = - new TreeMap(Bytes.BYTES_COMPARATOR); - for (byte [] startKey: startKeys) { + final Map map = + new TreeMap(Bytes.BYTES_COMPARATOR); + for (byte[] startKey : startKeys) { HRegionLocation hrl = new HRegionLocation( RegionInfoBuilder.newBuilder(tableName).setStartKey(startKey).build(), ServerName.valueOf(Bytes.toString(startKey), 0, 0)); @@ -191,19 +184,20 @@ public RegionLocator getRegionLocator(final TableName tableName) throws IOExcept final List locations = new ArrayList(map.values()); // Now make a RegionLocator mock backed by the abpve map and list of locations. RegionLocator mockedRegionLocator = Mockito.mock(RegionLocator.class); - Mockito.when(mockedRegionLocator.getRegionLocation(Mockito.any(byte [].class), - Mockito.anyBoolean())). - thenAnswer(new Answer() { + Mockito + .when( + mockedRegionLocator.getRegionLocation(Mockito.any(byte[].class), Mockito.anyBoolean())) + .thenAnswer(new Answer() { @Override public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] key = (byte [])args[0]; + Object[] args = invocationOnMock.getArguments(); + byte[] key = (byte[]) args[0]; return map.get(key); } }); Mockito.when(mockedRegionLocator.getAllRegionLocations()).thenReturn(locations); - Mockito.when(mockedRegionLocator.getStartEndKeys()). - thenReturn(new Pair(startKeys, endKeys)); + Mockito.when(mockedRegionLocator.getStartEndKeys()) + .thenReturn(new Pair(startKeys, endKeys)); Mockito.when(mockedRegionLocator.getName()).thenReturn(tableName); return mockedRegionLocator; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java index 89e7b49eb695..5ece32067795 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -72,9 +72,9 @@ public void setUp() throws Exception { @Override protected void initJob(List scans, Job job) throws IOException { - TableMapReduceUtil - .initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), ScanMapper.class, - ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, restoreDir); + TableMapReduceUtil.initMultiTableSnapshotMapperJob(getSnapshotScanMapping(scans), + ScanMapper.class, ImmutableBytesWritable.class, ImmutableBytesWritable.class, job, true, + restoreDir); } protected Map> getSnapshotScanMapping(final List scans) { @@ -83,7 +83,7 @@ protected Map> getSnapshotScanMapping(final List @Override public String apply(Scan input) { return snapshotNameForTable( - Bytes.toStringBinary(input.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME))); + Bytes.toStringBinary(input.getAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME))); } }).asMap(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java index 49f6ed41b313..9db8a0338428 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultiTableSnapshotInputFormatImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,17 +71,15 @@ public void setUp() throws Exception { // feels weird to introduce a RestoreSnapshotHelperFactory and inject that, which would // probably be the more "pure" // way of doing things. This is the lesser of two evils, perhaps? - doNothing().when(this.subject). - restoreSnapshot(any(), any(), any(), - any(), any()); + doNothing().when(this.subject).restoreSnapshot(any(), any(), any(), any(), any()); this.conf = new Configuration(); this.rootDir = new Path("file:///test-root-dir"); CommonFSUtils.setRootDir(conf, rootDir); - this.snapshotScans = ImmutableMap.>of("snapshot1", - ImmutableList.of(new Scan(Bytes.toBytes("1"), Bytes.toBytes("2"))), "snapshot2", - ImmutableList.of(new Scan(Bytes.toBytes("3"), Bytes.toBytes("4")), - new Scan(Bytes.toBytes("5"), Bytes.toBytes("6")))); + this.snapshotScans = ImmutableMap.> of("snapshot1", + ImmutableList.of(new Scan(Bytes.toBytes("1"), Bytes.toBytes("2"))), "snapshot2", + ImmutableList.of(new Scan(Bytes.toBytes("3"), Bytes.toBytes("4")), + new Scan(Bytes.toBytes("5"), Bytes.toBytes("6")))); this.restoreDir = new Path(CommonFSUtils.getRootDir(conf), "restore-dir"); @@ -91,8 +89,8 @@ public void callSetInput() throws IOException { subject.setInput(this.conf, snapshotScans, restoreDir); } - public Map> toScanWithEquals( - Map> snapshotScans) throws IOException { + public Map> + toScanWithEquals(Map> snapshotScans) throws IOException { Map> rtn = Maps.newHashMap(); for (Map.Entry> entry : snapshotScans.entrySet()) { @@ -114,7 +112,6 @@ public static class ScanWithEquals { /** * Creates a new instance of this class while copying all values. - * * @param scan The scan instance to copy from. * @throws java.io.IOException When copying the values fails. */ @@ -129,8 +126,8 @@ public boolean equals(Object obj) { return false; } ScanWithEquals otherScan = (ScanWithEquals) obj; - return Objects.equals(this.startRow, otherScan.startRow) && Objects - .equals(this.stopRow, otherScan.stopRow); + return Objects.equals(this.startRow, otherScan.startRow) + && Objects.equals(this.stopRow, otherScan.stopRow); } @Override @@ -140,9 +137,8 @@ public int hashCode() { @Override public String toString() { - return org.apache.hbase.thirdparty.com.google.common.base.MoreObjects. - toStringHelper(this).add("startRow", startRow) - .add("stopRow", stopRow).toString(); + return org.apache.hbase.thirdparty.com.google.common.base.MoreObjects.toStringHelper(this) + .add("startRow", startRow).add("stopRow", stopRow).toString(); } } @@ -177,7 +173,7 @@ public void testSetInputCreatesRestoreDirectoriesUnderRootRestoreDir() throws Ex for (Path snapshotDir : restoreDirs.values()) { assertEquals("Expected " + snapshotDir + " to be a child of " + restoreDir, restoreDir, - snapshotDir.getParent()); + snapshotDir.getParent()); } } @@ -189,7 +185,7 @@ public void testSetInputRestoresSnapshots() throws Exception { for (Map.Entry entry : snapshotDirs.entrySet()) { verify(this.subject).restoreSnapshot(eq(this.conf), eq(entry.getKey()), eq(this.rootDir), - eq(entry.getValue()), any()); + eq(entry.getValue()), any()); } } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java index dacc15c74b20..a31c0a262728 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestMultithreadedTableMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,11 +53,11 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestMultithreadedTableMapper { @ClassRule @@ -65,21 +65,19 @@ public class TestMultithreadedTableMapper { HBaseClassTestRule.forClass(TestMultithreadedTableMapper.class); private static final Logger LOG = LoggerFactory.getLogger(TestMultithreadedTableMapper.class); - private static final HBaseTestingUtility UTIL = - new HBaseTestingUtility(); + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); static final TableName MULTI_REGION_TABLE_NAME = TableName.valueOf("mrtest"); static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - static final int NUMBER_OF_THREADS = 10; + static final int NUMBER_OF_THREADS = 10; @BeforeClass public static void beforeClass() throws Exception { // Up the handlers; this test needs more than usual. UTIL.getConfiguration().setInt(HConstants.REGION_SERVER_HIGH_PRIORITY_HANDLER_COUNT, 10); UTIL.startMiniCluster(); - Table table = - UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, - OUTPUT_FAMILY }); + Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, + new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); UTIL.waitUntilAllRegionsAssigned(MULTI_REGION_TABLE_NAME); } @@ -92,29 +90,25 @@ public static void afterClass() throws Exception { /** * Pass the given key and processed record reduce */ - public static class ProcessContentsMapper - extends TableMapper { + public static class ProcessContentsMapper extends TableMapper { /** * Pass the key, and reversed value to reduce - * * @param key * @param value * @param context * @throws IOException */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) - throws IOException, InterruptedException { + public void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it String originalValue = Bytes.toString(value.getValue(INPUT_FAMILY, INPUT_FAMILY)); @@ -148,15 +142,12 @@ private void runTestOnTable(Table table) job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); - TableMapReduceUtil.initTableMapperJob( - table.getName(), scan, - MultithreadedTableMapper.class, ImmutableBytesWritable.class, - Put.class, job); + TableMapReduceUtil.initTableMapperJob(table.getName(), scan, MultithreadedTableMapper.class, + ImmutableBytesWritable.class, Put.class, job); MultithreadedTableMapper.setMapperClass(job, ProcessContentsMapper.class); MultithreadedTableMapper.setNumberOfThreads(job, NUMBER_OF_THREADS); - TableMapReduceUtil.initTableReducerJob( - table.getName().getNameAsString(), - IdentityTableReducer.class, job); + TableMapReduceUtil.initTableReducerJob(table.getName().getNameAsString(), + IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName()); assertTrue(job.waitForCompletion(true)); @@ -166,8 +157,7 @@ private void runTestOnTable(Table table) } finally { table.close(); if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -184,8 +174,8 @@ private void verify(TableName tableName) throws IOException { verified = true; break; } catch (NullPointerException e) { - // If here, a cell was empty. Presume its because updates came in - // after the scanner had been opened. Wait a while and retry. + // If here, a cell was empty. Presume its because updates came in + // after the scanner had been opened. Wait a while and retry. LOG.debug("Verification attempt failed: " + e.getMessage()); } try { @@ -199,15 +189,13 @@ private void verify(TableName tableName) throws IOException { } /** - * Looks at every value of the mapreduce output and verifies that indeed - * the values have been reversed. - * + * Looks at every value of the mapreduce output and verifies that indeed the values have been + * reversed. * @param table Table to scan. * @throws IOException * @throws NullPointerException if we failed to find a cell value */ - private void verifyAttempt(final Table table) - throws IOException, NullPointerException { + private void verifyAttempt(final Table table) throws IOException, NullPointerException { Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); scan.addFamily(OUTPUT_FAMILY); @@ -215,37 +203,34 @@ private void verifyAttempt(final Table table) try { Iterator itr = scanner.iterator(); assertTrue(itr.hasNext()); - while(itr.hasNext()) { + while (itr.hasNext()) { Result r = itr.next(); if (LOG.isDebugEnabled()) { - if (r.size() > 2 ) { - throw new IOException("Too many results, expected 2 got " + - r.size()); + if (r.size() > 2) { + throw new IOException("Too many results, expected 2 got " + r.size()); } } byte[] firstValue = null; byte[] secondValue = null; int count = 0; - for(Cell kv : r.listCells()) { + for (Cell kv : r.listCells()) { if (count == 0) { firstValue = CellUtil.cloneValue(kv); - }else if (count == 1) { + } else if (count == 1) { secondValue = CellUtil.cloneValue(kv); - }else if (count == 2) { + } else if (count == 2) { break; } count++; } String first = ""; if (firstValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": first value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } first = Bytes.toString(firstValue); String second = ""; if (secondValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": second value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": second value is null"); } byte[] secondReversed = new byte[secondValue.length]; for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) { @@ -254,9 +239,9 @@ private void verifyAttempt(final Table table) second = Bytes.toString(secondReversed); if (first.compareTo(second) != 0) { if (LOG.isDebugEnabled()) { - LOG.debug("second key is not the reverse of first. row=" + - Bytes.toStringBinary(r.getRow()) + ", first value=" + first + - ", second value=" + second); + LOG.debug( + "second key is not the reverse of first. row=" + Bytes.toStringBinary(r.getRow()) + + ", first value=" + first + ", second value=" + second); } fail(); } @@ -267,4 +252,3 @@ private void verifyAttempt(final Table table) } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java index 85d0f1c8ddd0..5abc088912ee 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRegionSizeCalculator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestRegionSizeCalculator { @ClassRule @@ -50,19 +50,16 @@ public class TestRegionSizeCalculator { private Configuration configuration = new Configuration(); private final long megabyte = 1024L * 1024L; - private final ServerName sn = ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, - ServerName.NON_STARTCODE); + private final ServerName sn = + ServerName.valueOf("local-rs", DEFAULT_REGIONSERVER_PORT, ServerName.NON_STARTCODE); @Test public void testSimpleTestCase() throws Exception { RegionLocator regionLocator = mockRegionLocator("region1", "region2", "region3"); - Admin admin = mockAdmin( - mockRegion("region1", 123), - mockRegion("region3", 1232), - mockRegion("region2", 54321) - ); + Admin admin = mockAdmin(mockRegion("region1", 123), mockRegion("region3", 1232), + mockRegion("region2", 54321)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); @@ -75,40 +72,36 @@ public void testSimpleTestCase() throws Exception { assertEquals(3, calculator.getRegionSizeMap().size()); } - /** - * When size of region in megabytes is larger than largest possible integer there could be - * error caused by lost of precision. - * */ + * When size of region in megabytes is larger than largest possible integer there could be error + * caused by lost of precision. + */ @Test public void testLargeRegion() throws Exception { RegionLocator regionLocator = mockRegionLocator("largeRegion"); - Admin admin = mockAdmin( - mockRegion("largeRegion", Integer.MAX_VALUE) - ); + Admin admin = mockAdmin(mockRegion("largeRegion", Integer.MAX_VALUE)); RegionSizeCalculator calculator = new RegionSizeCalculator(regionLocator, admin); - assertEquals(((long) Integer.MAX_VALUE) * megabyte, calculator.getRegionSize("largeRegion".getBytes())); + assertEquals(((long) Integer.MAX_VALUE) * megabyte, + calculator.getRegionSize("largeRegion".getBytes())); } - /** When calculator is disabled, it should return 0 for each request.*/ + /** When calculator is disabled, it should return 0 for each request. */ @Test public void testDisabled() throws Exception { String regionName = "cz.goout:/index.html"; RegionLocator table = mockRegionLocator(regionName); - Admin admin = mockAdmin( - mockRegion(regionName, 999) - ); + Admin admin = mockAdmin(mockRegion(regionName, 999)); - //first request on enabled calculator + // first request on enabled calculator RegionSizeCalculator calculator = new RegionSizeCalculator(table, admin); assertEquals(999 * megabyte, calculator.getRegionSize(regionName.getBytes())); - //then disabled calculator. + // then disabled calculator. configuration.setBoolean(RegionSizeCalculator.ENABLE_REGIONSIZECALCULATOR, false); RegionSizeCalculator disabledCalculator = new RegionSizeCalculator(table, admin); assertEquals(0 * megabyte, disabledCalculator.getRegionSize(regionName.getBytes())); @@ -118,7 +111,7 @@ public void testDisabled() throws Exception { /** * Makes some table with given region names. - * */ + */ private RegionLocator mockRegionLocator(String... regionNames) throws IOException { RegionLocator mockedTable = Mockito.mock(RegionLocator.class); when(mockedTable.getName()).thenReturn(TableName.valueOf("sizeTestTable")); @@ -136,7 +129,7 @@ private RegionLocator mockRegionLocator(String... regionNames) throws IOExceptio /** * Creates mock returning RegionLoad info about given servers. - */ + */ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { Admin mockAdmin = Mockito.mock(Admin.class); List regionLoads = new ArrayList<>(); @@ -151,9 +144,8 @@ private Admin mockAdmin(RegionMetrics... regionLoadArray) throws Exception { /** * Creates mock of region with given name and size. - * - * @param fileSizeMb number of megabytes occupied by region in file store in megabytes - * */ + * @param fileSizeMb number of megabytes occupied by region in file store in megabytes + */ private RegionMetrics mockRegion(String regionName, int fileSizeMb) { RegionMetrics region = Mockito.mock(RegionMetrics.class); when(region.getRegionName()).thenReturn(regionName.getBytes()); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java index c3abf4d544e0..71acc0c11557 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRoundRobinTableInputFormat.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -43,25 +44,17 @@ /** * Basic test of {@link RoundRobinTableInputFormat}; i.e. RRTIF. */ -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestRoundRobinTableInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestRoundRobinTableInputFormat.class); private static final int SERVERS_COUNT = 5; - private static final String[] KEYS = { - "aa", "ab", "ac", "ad", "ae", - "ba", "bb", "bc", "bd", "be", - "ca", "cb", "cc", "cd", "ce", - "da", "db", "dc", "dd", "de", - "ea", "eb", "ec", "ed", "ee", - "fa", "fb", "fc", "fd", "fe", - "ga", "gb", "gc", "gd", "ge", - "ha", "hb", "hc", "hd", "he", - "ia", "ib", "ic", "id", "ie", - "ja", "jb", "jc", "jd", "je", "jf" - }; + private static final String[] KEYS = { "aa", "ab", "ac", "ad", "ae", "ba", "bb", "bc", "bd", "be", + "ca", "cb", "cc", "cd", "ce", "da", "db", "dc", "dd", "de", "ea", "eb", "ec", "ed", "ee", + "fa", "fb", "fc", "fd", "fe", "ga", "gb", "gc", "gd", "ge", "ha", "hb", "hc", "hd", "he", + "ia", "ib", "ic", "id", "ie", "ja", "jb", "jc", "jd", "je", "jf" }; /** * Test default behavior. @@ -78,8 +71,8 @@ public void testRoundRobinSplit() throws IOException, InterruptedException { Arrays.sort(copy.toArray(new InputSplit[0]), new SplitComparator()); // Assert the sort is retained even after passing through SplitComparator. for (int i = 0; i < sortedSplits.size(); i++) { - TableSplit sortedTs = (TableSplit)sortedSplits.get(i); - TableSplit copyTs = (TableSplit)copy.get(i); + TableSplit sortedTs = (TableSplit) sortedSplits.get(i); + TableSplit copyTs = (TableSplit) copy.get(i); assertEquals(sortedTs.getEncodedRegionName(), copyTs.getEncodedRegionName()); } } @@ -90,17 +83,17 @@ public void testRoundRobinSplit() throws IOException, InterruptedException { private List createSplits() { List splits = new ArrayList<>(KEYS.length - 1); for (int i = 0; i < KEYS.length - 1; i++) { - InputSplit split = new TableSplit(TableName.valueOf("test"), new Scan(), - Bytes.toBytes(KEYS[i]), Bytes.toBytes(KEYS[i + 1]), String.valueOf(i % SERVERS_COUNT + 1), - "", 0); + InputSplit split = + new TableSplit(TableName.valueOf("test"), new Scan(), Bytes.toBytes(KEYS[i]), + Bytes.toBytes(KEYS[i + 1]), String.valueOf(i % SERVERS_COUNT + 1), "", 0); splits.add(split); } return splits; } private void testDistribution(List list) throws IOException, InterruptedException { - for (int i = 0; i < KEYS.length/SERVERS_COUNT; i++) { - int [] counts = new int[SERVERS_COUNT]; + for (int i = 0; i < KEYS.length / SERVERS_COUNT; i++) { + int[] counts = new int[SERVERS_COUNT]; for (int j = i * SERVERS_COUNT; j < i * SERVERS_COUNT + SERVERS_COUNT; j++) { counts[Integer.parseInt(list.get(j).getLocations()[0]) - 1]++; } @@ -120,21 +113,21 @@ private static class SplitComparator implements Comparator { public int compare(InputSplit o1, InputSplit o2) { try { return Long.compare(o1.getLength(), o2.getLength()); - } catch (IOException|InterruptedException e) { + } catch (IOException | InterruptedException e) { throw new RuntimeException("exception in compare", e); } } } /** - * Assert that lengths are descending. RRTIF writes lengths in descending order so any - * subsequent sort using dump SplitComparator as is done in JobSubmitter up in Hadoop keeps - * our RRTIF ordering. + * Assert that lengths are descending. RRTIF writes lengths in descending order so any subsequent + * sort using dump SplitComparator as is done in JobSubmitter up in Hadoop keeps our RRTIF + * ordering. */ private void assertLengthDescending(List list) - throws IOException, InterruptedException { + throws IOException, InterruptedException { long previousLength = Long.MAX_VALUE; - for (InputSplit is: list) { + for (InputSplit is : list) { long length = is.getLength(); assertTrue(previousLength + " " + length, previousLength > length); previousLength = length; @@ -166,12 +159,12 @@ public void testConfigureUnconfigure() { private void checkRetainsBooleanValue(JobContext jobContext, RoundRobinTableInputFormat rrtif, final boolean b) { - jobContext.getConfiguration(). - setBoolean(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE, b); + jobContext.getConfiguration() + .setBoolean(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE, b); rrtif.configure(); rrtif.unconfigure(); - String value = jobContext.getConfiguration(). - get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE); + String value = jobContext.getConfiguration() + .get(RoundRobinTableInputFormat.HBASE_REGIONSIZECALCULATOR_ENABLE); assertEquals(b, Boolean.valueOf(value)); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java index 5793dfad5880..47f8df07f490 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestRowCounter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -26,7 +26,6 @@ import java.io.PrintStream; import java.util.ArrayList; import java.util.Arrays; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; import org.apache.hadoop.hbase.TableName; @@ -50,12 +49,12 @@ /** * Test the rowcounter map reduce job. */ -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestRowCounter { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRowCounter.class); + HBaseClassTestRule.forClass(TestRowCounter.class); private static final Logger LOG = LoggerFactory.getLogger(TestRowCounter.class); private final static HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); @@ -89,71 +88,54 @@ public static void tearDownAfterClass() throws Exception { /** * Test a case when no column was specified in command line arguments. - * * @throws Exception */ @Test public void testRowCounterNoColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME - }; + String[] args = new String[] { TABLE_NAME }; runRowCount(args, 10); } /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows. - * + * Test a case when the column specified in command line arguments is exclusive for few rows. * @throws Exception */ @Test public void testRowCounterExclusiveColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL1 }; runRowCount(args, 8); } /** - * Test a case when the column specified in command line arguments is - * one for which the qualifier contains colons. - * + * Test a case when the column specified in command line arguments is one for which the qualifier + * contains colons. * @throws Exception */ @Test public void testRowCounterColumnWithColonInQualifier() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN }; runRowCount(args, 8); } /** - * Test a case when the column specified in command line arguments is not part - * of first KV for a row. - * + * Test a case when the column specified in command line arguments is not part of first KV for a + * row. * @throws Exception */ @Test public void testRowCounterHiddenColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL2 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL2 }; runRowCount(args, 10); } - /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows and also a row range filter is specified - * + * Test a case when the column specified in command line arguments is exclusive for few rows and + * also a row range filter is specified * @throws Exception */ @Test public void testRowCounterColumnAndRowRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 }; runRowCount(args, 8); } @@ -163,9 +145,7 @@ public void testRowCounterColumnAndRowRange() throws Exception { */ @Test public void testRowCounterRowSingleRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3" }; runRowCount(args, 2); } @@ -175,9 +155,7 @@ public void testRowCounterRowSingleRange() throws Exception { */ @Test public void testRowCounterRowSingleRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3" }; runRowCount(args, 3); } @@ -187,9 +165,7 @@ public void testRowCounterRowSingleRangeUpperBound() throws Exception { */ @Test public void testRowCounterRowMultiRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" }; runRowCount(args, 5); } @@ -199,22 +175,18 @@ public void testRowCounterRowMultiRangeUpperBound() throws Exception { */ @Test public void testRowCounterRowMultiRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" }; runRowCount(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys; - * one range is filled, another two are not + * Test a case when a range is specified with multiple ranges of start-end keys; one range is + * filled, another two are not * @throws Exception */ @Test public void testRowCounterRowMultiEmptyRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;;" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;;" }; runRowCount(args, 2); } @@ -222,19 +194,16 @@ public void testRowCounterRowMultiEmptyRange() throws Exception { public void testRowCounter10kRowRange() throws Exception { String tableName = TABLE_NAME + "10k"; - try (Table table = TEST_UTIL.createTable( - TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { + try ( + Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { writeRows(table, 10000, 0); } - String[] args = new String[] { - tableName, "--range=\\x00row9872,\\x00row9875" - }; + String[] args = new String[] { tableName, "--range=\\x00row9872,\\x00row9875" }; runRowCount(args, 3); } /** * Test a case when the timerange is specified with --starttime and --endtime options - * * @throws Exception */ @Test @@ -248,7 +217,8 @@ public void testRowCounterTimeRange() throws Exception { long ts; // clean up content of TABLE_NAME - Table table = TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); + Table table = + TEST_UTIL.createTable(TableName.valueOf(TABLE_NAME_TS_RANGE), Bytes.toBytes(COL_FAM)); ts = EnvironmentEdgeManager.currentTime(); put1.addColumn(family, col1, ts, Bytes.toBytes("val1")); @@ -262,38 +232,25 @@ public void testRowCounterTimeRange() throws Exception { table.put(put3); table.close(); - String[] args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + ts - }; + String[] args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + ts }; runRowCount(args, 1); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + (ts - 10) - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + (ts - 10) }; runRowCount(args, 1); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + ts, - "--endtime=" + (ts + 1000) - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, "--starttime=" + ts, + "--endtime=" + (ts + 1000) }; runRowCount(args, 2); - args = new String[] { - TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, - "--starttime=" + (ts - 30 * 1000), - "--endtime=" + (ts + 30 * 1000), - }; + args = new String[] { TABLE_NAME_TS_RANGE, COL_FAM + ":" + COL1, + "--starttime=" + (ts - 30 * 1000), "--endtime=" + (ts + 30 * 1000), }; runRowCount(args, 3); } /** * Run the RowCounter map reduce job and verify the row count. - * * @param args the command line arguments to be used for rowcounter job. * @param expectedCount the expected row count (result of map reduce job). * @throws Exception @@ -301,18 +258,17 @@ public void testRowCounterTimeRange() throws Exception { private void runRowCount(String[] args, int expectedCount) throws Exception { RowCounter rowCounter = new RowCounter(); rowCounter.setConf(TEST_UTIL.getConfiguration()); - args = Arrays.copyOf(args, args.length+1); - args[args.length-1]="--expectedCount=" + expectedCount; + args = Arrays.copyOf(args, args.length + 1); + args[args.length - 1] = "--expectedCount=" + expectedCount; long start = EnvironmentEdgeManager.currentTime(); int result = rowCounter.run(args); long duration = EnvironmentEdgeManager.currentTime() - start; LOG.debug("row count duration (ms): " + duration); - assertTrue(result==0); + assertTrue(result == 0); } /** * Run the RowCounter map reduce job and verify the row count. - * * @param args the command line arguments to be used for rowcounter job. * @param expectedCount the expected row count (result of map reduce job). * @throws Exception in case of any unexpected error. @@ -330,66 +286,50 @@ private void runCreateSubmittableJobWithArgs(String[] args, int expectedCount) t @Test public void testCreateSubmittableJobWithArgsNoColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME - }; + String[] args = new String[] { TABLE_NAME }; runCreateSubmittableJobWithArgs(args, 10); } /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows. - * + * Test a case when the column specified in command line arguments is exclusive for few rows. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsExclusiveColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL1 }; runCreateSubmittableJobWithArgs(args, 8); } /** - * Test a case when the column specified in command line arguments is - * one for which the qualifier contains colons. - * + * Test a case when the column specified in command line arguments is one for which the qualifier + * contains colons. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsColumnWithColonInQualifier() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COMPOSITE_COLUMN }; runCreateSubmittableJobWithArgs(args, 8); } /** - * Test a case when the column specified in command line arguments is not part - * of first KV for a row. - * + * Test a case when the column specified in command line arguments is not part of first KV for a + * row. * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsHiddenColumn() throws Exception { - String[] args = new String[] { - TABLE_NAME, COL_FAM + ":" + COL2 - }; + String[] args = new String[] { TABLE_NAME, COL_FAM + ":" + COL2 }; runCreateSubmittableJobWithArgs(args, 10); } - /** - * Test a case when the column specified in command line arguments is - * exclusive for few rows and also a row range filter is specified - * + * Test a case when the column specified in command line arguments is exclusive for few rows and + * also a row range filter is specified * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsColumnAndRowRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00rov,\\x00rox", COL_FAM + ":" + COL1 }; runCreateSubmittableJobWithArgs(args, 8); } @@ -399,9 +339,7 @@ public void testCreateSubmittableJobWithArgsColumnAndRowRange() throws Exception */ @Test public void testCreateSubmittableJobWithArgsRowSingleRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3" }; runCreateSubmittableJobWithArgs(args, 2); } @@ -411,9 +349,7 @@ public void testCreateSubmittableJobWithArgsRowSingleRange() throws Exception { */ @Test public void testCreateSubmittableJobWithArgsRowSingleRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3" }; runCreateSubmittableJobWithArgs(args, 3); } @@ -423,9 +359,7 @@ public void testCreateSubmittableJobWithArgsRowSingleRangeUpperBound() throws Ex */ @Test public void testCreateSubmittableJobWithArgsRowMultiRangeUpperBound() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" - }; + String[] args = new String[] { TABLE_NAME, "--range=,\\x00row3;\\x00row5,\\x00row7" }; runCreateSubmittableJobWithArgs(args, 5); } @@ -435,22 +369,18 @@ public void testCreateSubmittableJobWithArgsRowMultiRangeUpperBound() throws Exc */ @Test public void testCreateSubmittableJobWithArgsRowMultiRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;\\x00row5,\\x00row8" }; runCreateSubmittableJobWithArgs(args, 5); } /** - * Test a case when a range is specified with multiple ranges of start-end keys; - * one range is filled, another two are not + * Test a case when a range is specified with multiple ranges of start-end keys; one range is + * filled, another two are not * @throws Exception in case of any unexpected error. */ @Test public void testCreateSubmittableJobWithArgsRowMultiEmptyRange() throws Exception { - String[] args = new String[] { - TABLE_NAME, "--range=\\x00row1,\\x00row3;;" - }; + String[] args = new String[] { TABLE_NAME, "--range=\\x00row1,\\x00row3;;" }; runCreateSubmittableJobWithArgs(args, 2); } @@ -458,19 +388,16 @@ public void testCreateSubmittableJobWithArgsRowMultiEmptyRange() throws Exceptio public void testCreateSubmittableJobWithArgs10kRowRange() throws Exception { String tableName = TABLE_NAME + "CreateSubmittableJobWithArgs10kRowRange"; - try (Table table = TEST_UTIL.createTable( - TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { + try ( + Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM))) { writeRows(table, 10000, 0); } - String[] args = new String[] { - tableName, "--range=\\x00row9872,\\x00row9875" - }; + String[] args = new String[] { tableName, "--range=\\x00row9872,\\x00row9875" }; runCreateSubmittableJobWithArgs(args, 3); } /** * Test a case when the timerange is specified with --starttime and --endtime options - * * @throws Exception in case of any unexpected error. */ @Test @@ -483,7 +410,7 @@ public void testCreateSubmittableJobWithArgsTimeRange() throws Exception { long ts; - String tableName = TABLE_NAME_TS_RANGE+"CreateSubmittableJobWithArgs"; + String tableName = TABLE_NAME_TS_RANGE + "CreateSubmittableJobWithArgs"; // clean up content of TABLE_NAME Table table = TEST_UTIL.createTable(TableName.valueOf(tableName), Bytes.toBytes(COL_FAM)); @@ -499,39 +426,26 @@ public void testCreateSubmittableJobWithArgsTimeRange() throws Exception { table.put(put3); table.close(); - String[] args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + ts - }; + String[] args = + new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, "--endtime=" + ts }; runCreateSubmittableJobWithArgs(args, 1); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + 0, - "--endtime=" + (ts - 10) - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + 0, + "--endtime=" + (ts - 10) }; runCreateSubmittableJobWithArgs(args, 1); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + ts, - "--endtime=" + (ts + 1000) - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + ts, + "--endtime=" + (ts + 1000) }; runCreateSubmittableJobWithArgs(args, 2); - args = new String[] { - tableName, COL_FAM + ":" + COL1, - "--starttime=" + (ts - 30 * 1000), - "--endtime=" + (ts + 30 * 1000), - }; + args = new String[] { tableName, COL_FAM + ":" + COL1, "--starttime=" + (ts - 30 * 1000), + "--endtime=" + (ts + 30 * 1000), }; runCreateSubmittableJobWithArgs(args, 3); } /** - * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have - * two columns, Few have one. - * + * Writes TOTAL_ROWS number of distinct rows in to the table. Few rows have two columns, Few have + * one. * @param table * @throws IOException */ @@ -570,7 +484,7 @@ private static void writeRows(Table table, int totalRows, int rowsWithOneCol) th @Test public void testImportMain() throws Exception { SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); String[] args = {}; try { @@ -602,13 +516,13 @@ public void testHelp() throws Exception { ByteArrayOutputStream data = new ByteArrayOutputStream(); PrintStream stream = new PrintStream(data); System.setOut(stream); - String[] args = {"-h"}; + String[] args = { "-h" }; runRowCount(args, 0); assertUsageContent(data.toString()); - args = new String[]{"--help"}; + args = new String[] { "--help" }; runRowCount(args, 0); assertUsageContent(data.toString()); - }finally { + } finally { System.setOut(oldPrintStream); } } @@ -616,27 +530,27 @@ public void testHelp() throws Exception { @Test public void testInvalidTable() throws Exception { try { - String[] args = {"invalid"}; + String[] args = { "invalid" }; runRowCount(args, 0); fail("RowCounter should had failed with invalid table."); - }catch (Throwable e){ + } catch (Throwable e) { assertTrue(e instanceof AssertionError); } } private void assertUsageContent(String usage) { - assertTrue(usage.contains("usage: hbase rowcounter " - + " [options] [ ...]")); + assertTrue(usage + .contains("usage: hbase rowcounter " + " [options] [ ...]")); assertTrue(usage.contains("Options:\n")); - assertTrue(usage.contains("--starttime= " - + "starting time filter to start counting rows from.\n")); + assertTrue(usage.contains( + "--starttime= " + "starting time filter to start counting rows from.\n")); assertTrue(usage.contains("--endtime= " - + "end time filter limit, to only count rows up to this timestamp.\n")); - assertTrue(usage.contains("--range= " - + "[startKey],[endKey][;[startKey],[endKey]...]]\n")); + + "end time filter limit, to only count rows up to this timestamp.\n")); + assertTrue(usage + .contains("--range= " + "[startKey],[endKey][;[startKey],[endKey]...]]\n")); assertTrue(usage.contains("--expectedCount= expected number of rows to be count.\n")); - assertTrue(usage.contains("For performance, " - + "consider the following configuration properties:\n")); + assertTrue( + usage.contains("For performance, " + "consider the following configuration properties:\n")); assertTrue(usage.contains("-Dhbase.client.scanner.caching=100\n")); assertTrue(usage.contains("-Dmapreduce.map.speculative=false\n")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java index b28d8d9e89be..4bd7fc930910 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSimpleTotalOrderPartitioner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ /** * Test of simple partitioner. */ -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestSimpleTotalOrderPartitioner { @ClassRule @@ -48,7 +48,7 @@ public class TestSimpleTotalOrderPartitioner { public void testSplit() throws Exception { String start = "a"; String end = "{"; - SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); + SimpleTotalOrderPartitioner p = new SimpleTotalOrderPartitioner<>(); this.conf.set(SimpleTotalOrderPartitioner.START, start); this.conf.set(SimpleTotalOrderPartitioner.END, end); @@ -69,14 +69,12 @@ public void testSplit() throws Exception { partition = p.getPartition(q, HConstants.EMPTY_BYTE_ARRAY, 3); assertEquals(2, partition); // What about end and start keys. - ImmutableBytesWritable startBytes = - new ImmutableBytesWritable(Bytes.toBytes(start)); + ImmutableBytesWritable startBytes = new ImmutableBytesWritable(Bytes.toBytes(start)); partition = p.getPartition(startBytes, HConstants.EMPTY_BYTE_ARRAY, 2); assertEquals(0, partition); partition = p.getPartition(startBytes, HConstants.EMPTY_BYTE_ARRAY, 3); assertEquals(0, partition); - ImmutableBytesWritable endBytes = - new ImmutableBytesWritable(Bytes.toBytes("z")); + ImmutableBytesWritable endBytes = new ImmutableBytesWritable(Bytes.toBytes("z")); partition = p.getPartition(endBytes, HConstants.EMPTY_BYTE_ARRAY, 2); assertEquals(1, partition); partition = p.getPartition(endBytes, HConstants.EMPTY_BYTE_ARRAY, 3); @@ -84,4 +82,3 @@ public void testSplit() throws Exception { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java index caacfc648559..ae8bc793b363 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestSyncTable.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals; import java.util.Arrays; - import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -80,9 +79,9 @@ public static void afterClass() throws Exception { } private static byte[][] generateSplits(int numRows, int numRegions) { - byte[][] splitRows = new byte[numRegions-1][]; + byte[][] splitRows = new byte[numRegions - 1][]; for (int i = 1; i < numRegions; i++) { - splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); + splitRows[i - 1] = Bytes.toBytes(numRows * i / numRegions); } return splitRows; } @@ -117,8 +116,8 @@ public void testSyncTableDoDeletesFalse() throws Exception { writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--doDeletes=false"); + Counters syncCounters = + syncTables(sourceTableName, targetTableName, testDir, "--doDeletes=false"); assertTargetDoDeletesFalse(100, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -140,8 +139,7 @@ public void testSyncTableDoPutsFalse() throws Exception { writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--doPuts=false"); + Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir, "--doPuts=false"); assertTargetDoPutsFalse(70, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -163,8 +161,8 @@ public void testSyncTableIgnoreTimestampsTrue() throws Exception { long current = EnvironmentEdgeManager.currentTime(); writeTestData(sourceTableName, targetTableName, current - 1000, current); hashSourceTable(sourceTableName, testDir, "--ignoreTimestamps=true"); - Counters syncCounters = syncTables(sourceTableName, targetTableName, - testDir, "--ignoreTimestamps=true"); + Counters syncCounters = + syncTables(sourceTableName, targetTableName, testDir, "--ignoreTimestamps=true"); assertEqualTables(90, sourceTableName, targetTableName, true); assertEquals(50, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); @@ -196,22 +194,18 @@ private void assertEqualTables(int expectedRows, TableName sourceTableName, + " cells:" + targetRow); if (sourceRow == null) { - Assert.fail("Expected " + expectedRows - + " source rows but only found " + i); + Assert.fail("Expected " + expectedRows + " source rows but only found " + i); } if (targetRow == null) { - Assert.fail("Expected " + expectedRows - + " target rows but only found " + i); + Assert.fail("Expected " + expectedRows + " target rows but only found " + i); } Cell[] sourceCells = sourceRow.rawCells(); Cell[] targetCells = targetRow.rawCells(); if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " has " + sourceCells.length - + " cells in source table but " + targetCells.length - + " cells in target table"); + Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } for (int j = 0; j < sourceCells.length; j++) { Cell sourceCell = sourceCells[j]; @@ -240,13 +234,13 @@ private void assertEqualTables(int expectedRows, TableName sourceTableName, } Result sourceRow = sourceScanner.next(); if (sourceRow != null) { - Assert.fail("Source table has more than " + expectedRows - + " rows. Next row: " + Bytes.toInt(sourceRow.getRow())); + Assert.fail("Source table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(sourceRow.getRow())); } Result targetRow = targetScanner.next(); if (targetRow != null) { - Assert.fail("Target table has more than " + expectedRows - + " rows. Next row: " + Bytes.toInt(targetRow.getRow())); + Assert.fail("Target table has more than " + expectedRows + " rows. Next row: " + + Bytes.toInt(targetRow.getRow())); } sourceScanner.close(); targetScanner.close(); @@ -266,18 +260,16 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN int rowsCount = 0; while (targetRow != null) { rowsCount++; - //only compares values for existing rows, skipping rows existing on - //target only that were not deleted given --doDeletes=false + // only compares values for existing rows, skipping rows existing on + // target only that were not deleted given --doDeletes=false if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { targetRow = targetScanner.next(); continue; } - LOG.debug("SOURCE row: " + (sourceRow == null ? "null" - : Bytes.toInt(sourceRow.getRow())) + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? "null" - : Bytes.toInt(targetRow.getRow())) + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + " cells:" + targetRow); Cell[] sourceCells = sourceRow.rawCells(); @@ -287,18 +279,16 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN if (sourceCells.length == targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + targetRowKey + " should have more cells in " - + "target than in source"); + Assert.fail( + "Row " + targetRowKey + " should have more cells in " + "target than in source"); } } else { if (sourceCells.length != targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " has " + sourceCells.length - + " cells in source table but " + targetCells.length - + " cells in target table"); + Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) + " has " + sourceCells.length + + " cells in source table but " + targetCells.length + " cells in target table"); } } for (int j = 0; j < sourceCells.length; j++) { @@ -314,7 +304,7 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN if (!CellUtil.matchingQualifier(sourceCell, targetCell)) { Assert.fail("Qualifiers don't match"); } - if (targetRowKey < 80 && targetRowKey >= 90){ + if (targetRowKey < 80 && targetRowKey >= 90) { if (!CellUtil.matchingTimestamp(sourceCell, targetCell)) { Assert.fail("Timestamps don't match"); } @@ -323,16 +313,14 @@ private void assertTargetDoDeletesFalse(int expectedRows, TableName sourceTableN Assert.fail("Values don't match"); } } catch (Throwable t) { - LOG.debug("Source cell: " + sourceCell + " target cell: " - + targetCell); + LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); Throwables.propagate(t); } } targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.",expectedRows, - rowsCount); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); sourceScanner.close(); targetScanner.close(); sourceTable.close(); @@ -350,21 +338,17 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName Result sourceRow = sourceScanner.next(); int rowsCount = 0; - while (targetRow!=null) { - //only compares values for existing rows, skipping rows existing on - //source only that were not added to target given --doPuts=false + while (targetRow != null) { + // only compares values for existing rows, skipping rows existing on + // source only that were not added to target given --doPuts=false if (Bytes.toInt(sourceRow.getRow()) != Bytes.toInt(targetRow.getRow())) { sourceRow = sourceScanner.next(); continue; } - LOG.debug("SOURCE row: " + (sourceRow == null ? - "null" : - Bytes.toInt(sourceRow.getRow())) + LOG.debug("SOURCE row: " + (sourceRow == null ? "null" : Bytes.toInt(sourceRow.getRow())) + " cells:" + sourceRow); - LOG.debug("TARGET row: " + (targetRow == null ? - "null" : - Bytes.toInt(targetRow.getRow())) + LOG.debug("TARGET row: " + (targetRow == null ? "null" : Bytes.toInt(targetRow.getRow())) + " cells:" + targetRow); LOG.debug("rowsCount: " + rowsCount); @@ -381,8 +365,8 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName if (sourceCells.length == targetCells.length) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); LOG.debug("Target cells: " + Arrays.toString(targetCells)); - Assert.fail("Row " + Bytes.toInt(sourceRow.getRow()) - + " shouldn't have same number of cells."); + Assert.fail( + "Row " + Bytes.toInt(sourceRow.getRow()) + " shouldn't have same number of cells."); } } else if (targetRowKey >= 80 && targetRowKey < 90) { LOG.debug("Source cells: " + Arrays.toString(sourceCells)); @@ -395,8 +379,7 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName Cell targetCell = targetCells[j]; if (CellUtil.matchingValue(sourceCell, targetCell)) { Assert.fail("Cells values should not match for rows between " - + "90 and 100. Target row id: " + (Bytes.toInt(targetRow - .getRow()))); + + "90 and 100. Target row id: " + (Bytes.toInt(targetRow.getRow()))); } } } else { @@ -420,8 +403,7 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName Assert.fail("Values don't match"); } } catch (Throwable t) { - LOG.debug( - "Source cell: " + sourceCell + " target cell: " + targetCell); + LOG.debug("Source cell: " + sourceCell + " target cell: " + targetCell); Throwables.propagate(t); } } @@ -430,21 +412,20 @@ private void assertTargetDoPutsFalse(int expectedRows, TableName sourceTableName targetRow = targetScanner.next(); sourceRow = sourceScanner.next(); } - assertEquals("Target expected rows does not match.",expectedRows, - rowsCount); + assertEquals("Target expected rows does not match.", expectedRows, rowsCount); sourceScanner.close(); targetScanner.close(); sourceTable.close(); targetTable.close(); } - private Counters syncTables(TableName sourceTableName, TableName targetTableName, - Path testDir, String... options) throws Exception { + private Counters syncTables(TableName sourceTableName, TableName targetTableName, Path testDir, + String... options) throws Exception { SyncTable syncTable = new SyncTable(TEST_UTIL.getConfiguration()); - String[] args = Arrays.copyOf(options, options.length+3); + String[] args = Arrays.copyOf(options, options.length + 3); args[options.length] = testDir.toString(); - args[options.length+1] = sourceTableName.getNameAsString(); - args[options.length+2] = targetTableName.getNameAsString(); + args[options.length + 1] = sourceTableName.getNameAsString(); + args[options.length + 2] = targetTableName.getNameAsString(); int code = syncTable.run(args); assertEquals("sync table job failed", 0, code); @@ -455,10 +436,10 @@ private Counters syncTables(TableName sourceTableName, TableName targetTableName private void hashSourceTable(TableName sourceTableName, Path testDir, String... options) throws Exception { int numHashFiles = 3; - long batchSize = 100; // should be 2 batches per region + long batchSize = 100; // should be 2 batches per region int scanBatch = 1; HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration()); - String[] args = Arrays.copyOf(options, options.length+5); + String[] args = Arrays.copyOf(options, options.length + 5); args[options.length] = "--batchsize=" + batchSize; args[options.length + 1] = "--numhashfiles=" + numHashFiles; args[options.length + 2] = "--scanbatch=" + scanBatch; @@ -492,14 +473,14 @@ private void writeTestData(TableName sourceTableName, TableName targetTableName, int targetRegions = 6; if (ArrayUtils.isEmpty(timestamps)) { long current = EnvironmentEdgeManager.currentTime(); - timestamps = new long[]{current,current}; + timestamps = new long[] { current, current }; } - Table sourceTable = TEST_UTIL.createTable(sourceTableName, - family, generateSplits(numRows, sourceRegions)); + Table sourceTable = + TEST_UTIL.createTable(sourceTableName, family, generateSplits(numRows, sourceRegions)); - Table targetTable = TEST_UTIL.createTable(targetTableName, - family, generateSplits(numRows, targetRegions)); + Table targetTable = + TEST_UTIL.createTable(targetTableName, family, generateSplits(numRows, targetRegions)); int rowIndex = 0; // a bunch of identical rows @@ -571,8 +552,8 @@ private void writeTestData(TableName sourceTableName, TableName targetTableName, sourceTable.put(sourcePut); Put targetPut = new Put(Bytes.toBytes(rowIndex)); - targetPut.addColumn(family, column1, timestamps[1]+1, column1); - targetPut.addColumn(family, column2, timestamps[1]-1, value2); + targetPut.addColumn(family, column1, timestamps[1] + 1, column1); + targetPut.addColumn(family, column2, timestamps[1] - 1, value2); targetTable.put(targetPut); } // some rows with different values diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java index a915d85d5ba6..4be17206590d 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -71,7 +71,6 @@ /** * This tests the TableInputFormat and its recovery semantics - * */ @Category(LargeTests.class) public class TestTableInputFormat { @@ -107,7 +106,6 @@ public void before() throws IOException { /** * Setup a table with two rows and values. - * * @param tableName * @return A Table instance for the created table. * @throws IOException @@ -118,7 +116,6 @@ public static Table createTable(byte[] tableName) throws IOException { /** * Setup a table with two rows and values per column family. - * * @param tableName * @return A Table instance for the created table. * @throws IOException @@ -140,15 +137,14 @@ public static Table createTable(byte[] tableName, byte[][] families) throws IOEx /** * Verify that the result and key have expected values. - * * @param r single row result * @param key the row key * @param expectedKey the expected key * @param expectedValue the expected value * @return true if the result contains the expected key and value, false otherwise. */ - static boolean checkResult(Result r, ImmutableBytesWritable key, - byte[] expectedKey, byte[] expectedValue) { + static boolean checkResult(Result r, ImmutableBytesWritable key, byte[] expectedKey, + byte[] expectedValue) { assertEquals(0, key.compareTo(expectedKey)); Map vals = r.getFamilyMap(FAMILY); byte[] value = vals.values().iterator().next(); @@ -157,15 +153,12 @@ static boolean checkResult(Result r, ImmutableBytesWritable key, } /** - * Create table data and run tests on specified htable using the - * o.a.h.hbase.mapreduce API. - * + * Create table data and run tests on specified htable using the o.a.h.hbase.mapreduce API. * @param table * @throws IOException * @throws InterruptedException */ - static void runTestMapreduce(Table table) throws IOException, - InterruptedException { + static void runTestMapreduce(Table table) throws IOException, InterruptedException { org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl trr = new org.apache.hadoop.hbase.mapreduce.TableRecordReaderImpl(); Scan s = new Scan(); @@ -198,11 +191,9 @@ static void runTestMapreduce(Table table) throws IOException, /** * Create a table that IOE's on first scanner next call - * * @throws IOException */ - static Table createIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -231,13 +222,10 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { } /** - * Create a table that throws a NotServingRegionException on first scanner - * next call - * + * Create a table that throws a NotServingRegionException on first scanner next call * @throws IOException */ - static Table createDNRIOEScannerTable(byte[] name, final int failCnt) - throws IOException { + static Table createDNRIOEScannerTable(byte[] name, final int failCnt) throws IOException { // build up a mock scanner stuff to fail the first time Answer a = new Answer() { int cnt = 0; @@ -252,8 +240,7 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { ResultScanner scanner = mock(ResultScanner.class); invocation.callRealMethod(); // simulate NotServingRegionException - doThrow( - new NotServingRegionException("Injected simulated TimeoutException")) + doThrow(new NotServingRegionException("Injected simulated TimeoutException")) .when(scanner).next(); return scanner; } @@ -270,46 +257,40 @@ public ResultScanner answer(InvocationOnMock invocation) throws Throwable { /** * Run test assuming no errors using newer mapreduce api - * * @throws IOException * @throws InterruptedException */ @Test - public void testTableRecordReaderMapreduce() throws IOException, - InterruptedException { + public void testTableRecordReaderMapreduce() throws IOException, InterruptedException { Table table = createTable("table1-mr".getBytes()); runTestMapreduce(table); } /** * Run test assuming Scanner IOException failure using newer mapreduce api - * * @throws IOException * @throws InterruptedException */ @Test - public void testTableRecordReaderScannerFailMapreduce() throws IOException, - InterruptedException { + public void testTableRecordReaderScannerFailMapreduce() throws IOException, InterruptedException { Table htable = createIOEScannerTable("table2-mr".getBytes(), 1); runTestMapreduce(htable); } /** * Run test assuming Scanner IOException failure using newer mapreduce api - * * @throws IOException * @throws InterruptedException */ @Test(expected = IOException.class) - public void testTableRecordReaderScannerFailMapreduceTwice() throws IOException, - InterruptedException { + public void testTableRecordReaderScannerFailMapreduceTwice() + throws IOException, InterruptedException { Table htable = createIOEScannerTable("table3-mr".getBytes(), 2); runTestMapreduce(htable); } /** * Run test assuming NotServingRegionException using newer mapreduce api - * * @throws InterruptedException * @throws org.apache.hadoop.hbase.DoNotRetryIOException */ @@ -322,7 +303,6 @@ public void testTableRecordReaderScannerTimeoutMapreduce() /** * Run test assuming NotServingRegionException using newer mapreduce api - * * @throws InterruptedException * @throws org.apache.hadoop.hbase.NotServingRegionException */ @@ -348,8 +328,8 @@ public void testExtensionOfTableInputFormatBase() @Test public void testJobConfigurableExtensionOfTableInputFormatBase() throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + - "using JobConfigurable."); + LOG.info( + "testing use of an InputFormat taht extends InputFormatBase, " + "using JobConfigurable."); final Table htable = createTable(Bytes.toBytes("exampleJobConfigurableTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleJobConfigurableTIF.class); @@ -358,8 +338,8 @@ public void testJobConfigurableExtensionOfTableInputFormatBase() @Test public void testDeprecatedExtensionOfTableInputFormatBase() throws IOException, InterruptedException, ClassNotFoundException { - LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + - "using the approach documented in 0.98."); + LOG.info("testing use of an InputFormat taht extends InputFormatBase, " + + "using the approach documented in 0.98."); final Table htable = createTable(Bytes.toBytes("exampleDeprecatedTable"), new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }); testInputFormat(ExampleDeprecatedTIF.class); @@ -392,17 +372,19 @@ void testInputFormat(Class clazz) public static class ExampleVerifier extends TableMapper { @Override - public void map(ImmutableBytesWritable key, Result value, Context context) - throws IOException { + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException { for (Cell cell : value.listCells()) { - context.getCounter(TestTableInputFormat.class.getName() + ":row", - Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) + context + .getCounter(TestTableInputFormat.class.getName() + ":row", + Bytes.toString(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) .increment(1l); - context.getCounter(TestTableInputFormat.class.getName() + ":family", - Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) + context + .getCounter(TestTableInputFormat.class.getName() + ":family", + Bytes.toString(cell.getFamilyArray(), cell.getFamilyOffset(), cell.getFamilyLength())) .increment(1l); - context.getCounter(TestTableInputFormat.class.getName() + ":value", - Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) + context + .getCounter(TestTableInputFormat.class.getName() + ":value", + Bytes.toString(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength())) .increment(1l); } } @@ -418,15 +400,14 @@ public void configure(JobConf job) { Table exampleTable = connection.getTable(TableName.valueOf(("exampleDeprecatedTable"))); // mandatory initializeTable(connection, exampleTable.getName()); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); } Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); scan.setFilter(exampleFilter); setScan(scan); } catch (IOException exception) { @@ -436,7 +417,6 @@ public void configure(JobConf job) { } - public static class ExampleJobConfigurableTIF extends TableInputFormatBase implements JobConfigurable { @@ -447,15 +427,14 @@ public void configure(JobConf job) { TableName tableName = TableName.valueOf("exampleJobConfigurableTable"); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; - //optional + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; + // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); } Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); scan.setFilter(exampleFilter); setScan(scan); } catch (IOException exception) { @@ -464,29 +443,26 @@ public void configure(JobConf job) { } } - public static class ExampleTIF extends TableInputFormatBase { @Override protected void initialize(JobContext job) throws IOException { - Connection connection = ConnectionFactory.createConnection(HBaseConfiguration.create( - job.getConfiguration())); + Connection connection = + ConnectionFactory.createConnection(HBaseConfiguration.create(job.getConfiguration())); TableName tableName = TableName.valueOf("exampleTable"); // mandatory initializeTable(connection, tableName); - byte[][] inputColumns = new byte [][] { Bytes.toBytes("columnA"), - Bytes.toBytes("columnB") }; - //optional + byte[][] inputColumns = new byte[][] { Bytes.toBytes("columnA"), Bytes.toBytes("columnB") }; + // optional Scan scan = new Scan(); for (byte[] family : inputColumns) { scan.addFamily(family); } Filter exampleFilter = - new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); + new RowFilter(CompareOperator.EQUAL, new RegexStringComparator("aa.*")); scan.setFilter(exampleFilter); setScan(scan); } } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java index ee46726d916b..731c080d5638 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,6 +22,7 @@ import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.IOException; import java.net.Inet6Address; import java.net.InetAddress; @@ -58,7 +59,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -@Category({SmallTests.class}) +@Category({ SmallTests.class }) public class TestTableInputFormatBase { @ClassRule @@ -89,13 +90,12 @@ public void testReuseRegionSizeCalculator() throws IOException { format.getSplits(context); // should only be 2 despite calling getSplits 4 times - Mockito.verify(format, Mockito.times(2)) - .createRegionSizeCalculator(Mockito.any(), Mockito.any()); + Mockito.verify(format, Mockito.times(2)).createRegionSizeCalculator(Mockito.any(), + Mockito.any()); } @Test - public void testTableInputFormatBaseReverseDNSForIPv6() - throws UnknownHostException { + public void testTableInputFormatBaseReverseDNSForIPv6() throws UnknownHostException { String address = "ipv6.google.com"; String localhost = null; InetAddress addr = null; @@ -107,11 +107,11 @@ public void testTableInputFormatBaseReverseDNSForIPv6() // google.com is down, we can probably forgive this test. return; } - System.out.println("Should retrun the hostname for this host " + - localhost + " addr : " + addr); + System.out.println("Should retrun the hostname for this host " + localhost + " addr : " + addr); String actualHostName = inputFormat.reverseDNS(addr); - assertEquals("Should retrun the hostname for this host. Expected : " + - localhost + " Actual : " + actualHostName, localhost, actualHostName); + assertEquals("Should retrun the hostname for this host. Expected : " + localhost + " Actual : " + + actualHostName, + localhost, actualHostName); } @Test @@ -119,7 +119,7 @@ public void testNonSuccessiveSplitsAreNotMerged() throws IOException { JobContext context = mock(JobContext.class); Configuration conf = HBaseConfiguration.create(); conf.set(ClusterConnection.HBASE_CLIENT_CONNECTION_IMPL, - ConnectionForMergeTesting.class.getName()); + ConnectionForMergeTesting.class.getName()); conf.set(TableInputFormat.INPUT_TABLE, "testTable"); conf.setBoolean(TableInputFormatBase.MAPREDUCE_INPUT_AUTOBALANCE, true); when(context.getConfiguration()).thenReturn(conf); @@ -129,13 +129,13 @@ public void testNonSuccessiveSplitsAreNotMerged() throws IOException { // split["b", "c"] is excluded, split["o", "p"] and split["p", "q"] are merged, // but split["a", "b"] and split["c", "d"] are not merged. assertEquals(ConnectionForMergeTesting.START_KEYS.length - 1 - 1, - tifExclude.getSplits(context).size()); + tifExclude.getSplits(context).size()); } /** * Subclass of {@link TableInputFormat} to use in {@link #testNonSuccessiveSplitsAreNotMerged}. - * This class overrides {@link TableInputFormatBase#includeRegionInSplit} - * to exclude specific splits. + * This class overrides {@link TableInputFormatBase#includeRegionInSplit} to exclude specific + * splits. */ private static class TableInputFormatForMergeTesting extends TableInputFormat { private byte[] prefixStartKey = Bytes.toBytes("b"); @@ -146,7 +146,7 @@ private static class TableInputFormatForMergeTesting extends TableInputFormat { * Exclude regions which contain rows starting with "b". */ @Override - protected boolean includeRegionInSplit(final byte[] startKey, final byte [] endKey) { + protected boolean includeRegionInSplit(final byte[] startKey, final byte[] endKey) { if (Bytes.compareTo(startKey, prefixEndKey) < 0 && (Bytes.compareTo(prefixStartKey, endKey) < 0 || Bytes.equals(endKey, HConstants.EMPTY_END_ROW))) { @@ -165,26 +165,23 @@ protected void initializeTable(Connection connection, TableName tableName) throw @Override protected RegionSizeCalculator createRegionSizeCalculator(RegionLocator locator, Admin admin) - throws IOException { + throws IOException { return sizeCalculator; } } /** - * Connection class to use in {@link #testNonSuccessiveSplitsAreNotMerged}. - * This class returns mocked {@link Table}, {@link RegionLocator}, {@link RegionSizeCalculator}, - * and {@link Admin}. + * Connection class to use in {@link #testNonSuccessiveSplitsAreNotMerged}. This class returns + * mocked {@link Table}, {@link RegionLocator}, {@link RegionSizeCalculator}, and {@link Admin}. */ private static class ConnectionForMergeTesting implements Connection { - public static final byte[][] SPLITS = new byte[][] { - Bytes.toBytes("a"), Bytes.toBytes("b"), Bytes.toBytes("c"), Bytes.toBytes("d"), - Bytes.toBytes("e"), Bytes.toBytes("f"), Bytes.toBytes("g"), Bytes.toBytes("h"), - Bytes.toBytes("i"), Bytes.toBytes("j"), Bytes.toBytes("k"), Bytes.toBytes("l"), - Bytes.toBytes("m"), Bytes.toBytes("n"), Bytes.toBytes("o"), Bytes.toBytes("p"), - Bytes.toBytes("q"), Bytes.toBytes("r"), Bytes.toBytes("s"), Bytes.toBytes("t"), - Bytes.toBytes("u"), Bytes.toBytes("v"), Bytes.toBytes("w"), Bytes.toBytes("x"), - Bytes.toBytes("y"), Bytes.toBytes("z") - }; + public static final byte[][] SPLITS = new byte[][] { Bytes.toBytes("a"), Bytes.toBytes("b"), + Bytes.toBytes("c"), Bytes.toBytes("d"), Bytes.toBytes("e"), Bytes.toBytes("f"), + Bytes.toBytes("g"), Bytes.toBytes("h"), Bytes.toBytes("i"), Bytes.toBytes("j"), + Bytes.toBytes("k"), Bytes.toBytes("l"), Bytes.toBytes("m"), Bytes.toBytes("n"), + Bytes.toBytes("o"), Bytes.toBytes("p"), Bytes.toBytes("q"), Bytes.toBytes("r"), + Bytes.toBytes("s"), Bytes.toBytes("t"), Bytes.toBytes("u"), Bytes.toBytes("v"), + Bytes.toBytes("w"), Bytes.toBytes("x"), Bytes.toBytes("y"), Bytes.toBytes("z") }; public static final byte[][] START_KEYS; public static final byte[][] END_KEYS; @@ -265,32 +262,31 @@ public RegionLocator getRegionLocator(TableName tableName) throws IOException { } RegionLocator locator = mock(RegionLocator.class); - when(locator.getRegionLocation(any(byte [].class), anyBoolean())). - thenAnswer(new Answer() { - @Override - public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] key = (byte [])args[0]; - return locationMap.get(key); - } - }); - when(locator.getStartEndKeys()). - thenReturn(new Pair(START_KEYS, END_KEYS)); + when(locator.getRegionLocation(any(byte[].class), anyBoolean())) + .thenAnswer(new Answer() { + @Override + public HRegionLocation answer(InvocationOnMock invocationOnMock) throws Throwable { + Object[] args = invocationOnMock.getArguments(); + byte[] key = (byte[]) args[0]; + return locationMap.get(key); + } + }); + when(locator.getStartEndKeys()) + .thenReturn(new Pair(START_KEYS, END_KEYS)); return locator; } public RegionSizeCalculator getRegionSizeCalculator() { RegionSizeCalculator sizeCalculator = mock(RegionSizeCalculator.class); - when(sizeCalculator.getRegionSize(any(byte[].class))). - thenAnswer(new Answer() { - @Override - public Long answer(InvocationOnMock invocationOnMock) throws Throwable { - Object [] args = invocationOnMock.getArguments(); - byte [] regionId = (byte [])args[0]; - byte[] startKey = RegionInfo.getStartKey(regionId); - return SIZE_MAP.get(startKey); - } - }); + when(sizeCalculator.getRegionSize(any(byte[].class))).thenAnswer(new Answer() { + @Override + public Long answer(InvocationOnMock invocationOnMock) throws Throwable { + Object[] args = invocationOnMock.getArguments(); + byte[] regionId = (byte[]) args[0]; + byte[] startKey = RegionInfo.getStartKey(regionId); + return SIZE_MAP.get(startKey); + } + }); return sizeCalculator; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java index a116ecb72fa6..2129b42b53ec 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScan.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScan extends TestTableInputFormatScanBase { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScan.class); + HBaseClassTestRule.forClass(TestTableInputFormatScan.class); /** * Tests a MR scan using specific number of mappers. The test table has 26 regions, diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java index 1e5fb8190790..8a20b9cea384 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -50,7 +50,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * Tests various scan start and stop row scenarios. This is set in a scan and tested in a MapReduce * job to see if that is handed over and done properly too. @@ -61,7 +60,7 @@ public abstract class TestTableInputFormatScanBase { static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(); static final TableName TABLE_NAME = TableName.valueOf("scantest"); - static final byte[][] INPUT_FAMILYS = {Bytes.toBytes("content1"), Bytes.toBytes("content2")}; + static final byte[][] INPUT_FAMILYS = { Bytes.toBytes("content1"), Bytes.toBytes("content2") }; static final String KEY_STARTROW = "startRow"; static final String KEY_LASTROW = "stpRow"; @@ -85,35 +84,32 @@ public static void tearDownAfterClass() throws Exception { * Pass the key and value to reduce. */ public static class ScanMapper - extends TableMapper { + extends TableMapper { /** * Pass the key and value to reduce. - * - * @param key The key, here "aaa", "aab" etc. - * @param value The value is the same as the key. - * @param context The task context. + * @param key The key, here "aaa", "aab" etc. + * @param value The value is the same as the key. + * @param context The task context. * @throws IOException When reading the rows fails. */ @Override - public void map(ImmutableBytesWritable key, Result value, - Context context) + public void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { if (value.size() != 2) { throw new IOException("There should be two input columns"); } - Map>> - cfMap = value.getMap(); + Map>> cfMap = value.getMap(); if (!cfMap.containsKey(INPUT_FAMILYS[0]) || !cfMap.containsKey(INPUT_FAMILYS[1])) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILYS[0]) + "' or '" + Bytes.toString(INPUT_FAMILYS[1]) + "'."); + throw new IOException("Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILYS[0]) + + "' or '" + Bytes.toString(INPUT_FAMILYS[1]) + "'."); } String val0 = Bytes.toStringBinary(value.getValue(INPUT_FAMILYS[0], null)); String val1 = Bytes.toStringBinary(value.getValue(INPUT_FAMILYS[1], null)); - LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + - ", value -> (" + val0 + ", " + val1 + ")"); + LOG.info("map: key -> " + Bytes.toStringBinary(key.get()) + ", value -> (" + val0 + ", " + + val1 + ")"); context.write(key, key); } } @@ -122,28 +118,25 @@ public void map(ImmutableBytesWritable key, Result value, * Checks the last and first key seen against the scanner boundaries. */ public static class ScanReducer - extends Reducer { + extends Reducer { private String first = null; private String last = null; - protected void reduce(ImmutableBytesWritable key, - Iterable values, Context context) - throws IOException ,InterruptedException { + protected void reduce(ImmutableBytesWritable key, Iterable values, + Context context) throws IOException, InterruptedException { int count = 0; for (ImmutableBytesWritable value : values) { String val = Bytes.toStringBinary(value.get()); - LOG.info("reduce: key[" + count + "] -> " + - Bytes.toStringBinary(key.get()) + ", value -> " + val); + LOG.info( + "reduce: key[" + count + "] -> " + Bytes.toStringBinary(key.get()) + ", value -> " + val); if (first == null) first = val; last = val; count++; } } - protected void cleanup(Context context) - throws IOException, InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { Configuration c = context.getConfiguration(); String startRow = c.get(KEY_STARTROW); String lastRow = c.get(KEY_LASTROW); @@ -164,8 +157,8 @@ protected void cleanup(Context context) */ protected void testScanFromConfiguration(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + - "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + String jobName = "ScanFromConfig" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + + "To" + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); c.set(TableInputFormat.INPUT_TABLE, TABLE_NAME.getNameAsString()); c.set(TableInputFormat.SCAN_COLUMN_FAMILY, @@ -198,8 +191,8 @@ protected void testScanFromConfiguration(String start, String stop, String last) */ protected void testScan(String start, String stop, String last) throws IOException, InterruptedException, ClassNotFoundException { - String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + - (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); + String jobName = "Scan" + (start != null ? start.toUpperCase(Locale.ROOT) : "Empty") + "To" + + (stop != null ? stop.toUpperCase(Locale.ROOT) : "Empty"); LOG.info("Before map/reduce startup - job " + jobName); Configuration c = new Configuration(TEST_UTIL.getConfiguration()); Scan scan = new Scan(); @@ -225,7 +218,6 @@ protected void testScan(String start, String stop, String last) LOG.info("After map/reduce completion - job " + jobName); } - /** * Tests Number of inputSplits for MR job when specify number of mappers for TableInputFormatXXX * This test does not run MR job @@ -294,7 +286,7 @@ protected void testAutobalanceNumOfSplit() throws IOException { int[] regionLen = { 10, 20, 20, 40, 60 }; for (int i = 0; i < 5; i++) { InputSplit split = new TableSplit(TABLE_NAME, new Scan(), Bytes.toBytes(i), - Bytes.toBytes(i + 1), "", "", regionLen[i] * 1048576); + Bytes.toBytes(i + 1), "", "", regionLen[i] * 1048576); splits.add(split); } TableInputFormat tif = new TableInputFormat(); @@ -311,4 +303,3 @@ protected void testAutobalanceNumOfSplit() throws IOException { assertNotEquals("The seventh split start key should not be", 4, Bytes.toInt(ts4.getStartRow())); } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java index d7cefd61b148..0653e7e109d8 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToAPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToAPP extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToAPP.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToAPP.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java index 598a345834d8..d1f42f256255 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBA.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToBBA extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBA.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBA.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java index 6d3674caad86..22cfb2cd5c62 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToBBB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToBBB extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBB.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToBBB.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java index f5d4de10a88a..2e62e4d9c7c6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToEmpty extends TestTableInputFormatSc @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java index 939fc936f955..45fc2208e22c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanEmptyToOPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanEmptyToOPP extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToOPP.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanEmptyToOPP.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java index 32f768c00fb8..0126ef3b82c9 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToOPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanOBBToOPP extends TestTableInputFormatScanBa @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToOPP.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToOPP.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java index 5ecb4e60f4e0..5de7e14ced43 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOBBToQPP.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanOBBToQPP extends TestTableInputFormatScanBa @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToQPP.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanOBBToQPP.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java index 7b2ccded7e19..5874d893b453 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanOPPToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanOPPToEmpty extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanOPPToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanOPPToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java index 2801f4eb8bf7..b1dc43efed38 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYXToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanYYXToEmpty extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYYXToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanYYXToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java index 97a4998e5537..49a7f4fed407 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYYYToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanYYYToEmpty extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYYYToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanYYYToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java index 3d91ff2b7b3c..524ea567ba3b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableInputFormatScanYZYToEmpty.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ public class TestTableInputFormatScanYZYToEmpty extends TestTableInputFormatScan @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestTableInputFormatScanYZYToEmpty.class); + HBaseClassTestRule.forClass(TestTableInputFormatScanYZYToEmpty.class); /** * Tests a MR scan using specific start and stop rows. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java index 786da1a02049..8e69b39f60bf 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,12 +51,12 @@ import org.slf4j.LoggerFactory; /** - * Test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of - * a particular cell, and write it back to the table. + * Test Map/Reduce job over HBase tables. The map/reduce process we're testing on our tables is + * simple - take every row in the table, reverse the value of a particular cell, and write it back + * to the table. */ -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableMapReduce extends TestTableMapReduceBase { @ClassRule @@ -66,7 +66,9 @@ public class TestTableMapReduce extends TestTableMapReduceBase { private static final Logger LOG = LoggerFactory.getLogger(TestTableMapReduce.class); @Override - protected Logger getLog() { return LOG; } + protected Logger getLog() { + return LOG; + } /** * Pass the given key and processed record reduce @@ -75,24 +77,21 @@ static class ProcessContentsMapper extends TableMapper>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it @@ -115,12 +114,9 @@ protected void runTestOnTable(Table table) throws IOException { job.setNumReduceTasks(1); Scan scan = new Scan(); scan.addFamily(INPUT_FAMILY); - TableMapReduceUtil.initTableMapperJob( - table.getName().getNameAsString(), scan, - ProcessContentsMapper.class, ImmutableBytesWritable.class, - Put.class, job); - TableMapReduceUtil.initTableReducerJob( - table.getName().getNameAsString(), + TableMapReduceUtil.initTableMapperJob(table.getName().getNameAsString(), scan, + ProcessContentsMapper.class, ImmutableBytesWritable.class, Put.class, job); + TableMapReduceUtil.initTableReducerJob(table.getName().getNameAsString(), IdentityTableReducer.class, job); FileOutputFormat.setOutputPath(job, new Path("test")); LOG.info("Started " + table.getName().getNameAsString()); @@ -138,8 +134,7 @@ protected void runTestOnTable(Table table) throws IOException { } finally { table.close(); if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -151,8 +146,8 @@ protected void runTestOnTable(Table table) throws IOException { */ private void verifyJobCountersAreEmitted(Job job) throws IOException { Counters counters = job.getCounters(); - Counter counter - = counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); + Counter counter = + counters.findCounter(TableRecordReaderImpl.HBASE_COUNTER_GROUP_NAME, "RPC_CALLS"); assertNotNull("Unable to find Job counter for HBase scan metrics, RPC_CALLS", counter); assertTrue("Counter value for RPC_CALLS should be larger than 0", counter.getValue() > 0); } @@ -161,7 +156,7 @@ private void verifyJobCountersAreEmitted(Job job) throws IOException { public void testWritingToDisabledTable() throws IOException { try (Admin admin = UTIL.getConnection().getAdmin(); - Table table = UTIL.getConnection().getTable(TABLE_FOR_NEGATIVE_TESTS)) { + Table table = UTIL.getConnection().getTable(TABLE_FOR_NEGATIVE_TESTS)) { admin.disableTable(table.getName()); runTestOnTable(table); fail("Should not have reached here, should have thrown an exception"); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java index f86f20d6412a..2b69ff8e5c6c 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceBase.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,9 +43,9 @@ import org.slf4j.Logger; /** - * A base class for a test Map/Reduce job over HBase tables. The map/reduce process we're testing - * on our tables is simple - take every row in the table, reverse the value of a particular cell, - * and write it back to the table. Implements common components between mapred and mapreduce + * A base class for a test Map/Reduce job over HBase tables. The map/reduce process we're testing on + * our tables is simple - take every row in the table, reverse the value of a particular cell, and + * write it back to the table. Implements common components between mapred and mapreduce * implementations. */ public abstract class TestTableMapReduceBase { @@ -56,10 +55,7 @@ public abstract class TestTableMapReduceBase { protected static final byte[] INPUT_FAMILY = Bytes.toBytes("contents"); protected static final byte[] OUTPUT_FAMILY = Bytes.toBytes("text"); - protected static final byte[][] columns = new byte[][] { - INPUT_FAMILY, - OUTPUT_FAMILY - }; + protected static final byte[][] columns = new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }; /** * Retrieve my logger instance. @@ -74,9 +70,8 @@ public abstract class TestTableMapReduceBase { @BeforeClass public static void beforeClass() throws Exception { UTIL.startMiniCluster(); - Table table = - UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, new byte[][] { INPUT_FAMILY, - OUTPUT_FAMILY }); + Table table = UTIL.createMultiRegionTable(MULTI_REGION_TABLE_NAME, + new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); UTIL.loadTable(table, INPUT_FAMILY, false); UTIL.createTable(TABLE_FOR_NEGATIVE_TESTS, new byte[][] { INPUT_FAMILY, OUTPUT_FAMILY }); } @@ -111,11 +106,10 @@ protected static Put map(ImmutableBytesWritable key, Result value) throws IOExce if (value.size() != 1) { throw new IOException("There should only be one input column"); } - Map>> - cf = value.getMap(); - if(!cf.containsKey(INPUT_FAMILY)) { - throw new IOException("Wrong input columns. Missing: '" + - Bytes.toString(INPUT_FAMILY) + "'."); + Map>> cf = value.getMap(); + if (!cf.containsKey(INPUT_FAMILY)) { + throw new IOException( + "Wrong input columns. Missing: '" + Bytes.toString(INPUT_FAMILY) + "'."); } // Get the original value and reverse it @@ -157,8 +151,8 @@ protected void verify(TableName tableName) throws IOException { } /** - * Looks at every value of the mapreduce output and verifies that indeed - * the values have been reversed. + * Looks at every value of the mapreduce output and verifies that indeed the values have been + * reversed. * @param table Table to scan. * @throws IOException * @throws NullPointerException if we failed to find a cell value @@ -170,18 +164,17 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc try { Iterator itr = scanner.iterator(); assertTrue(itr.hasNext()); - while(itr.hasNext()) { + while (itr.hasNext()) { Result r = itr.next(); if (getLog().isDebugEnabled()) { - if (r.size() > 2 ) { - throw new IOException("Too many results, expected 2 got " + - r.size()); + if (r.size() > 2) { + throw new IOException("Too many results, expected 2 got " + r.size()); } } byte[] firstValue = null; byte[] secondValue = null; int count = 0; - for(Cell kv : r.listCells()) { + for (Cell kv : r.listCells()) { if (count == 0) { firstValue = CellUtil.cloneValue(kv); } @@ -194,16 +187,13 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc } } - if (firstValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": first value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": first value is null"); } String first = Bytes.toString(firstValue); if (secondValue == null) { - throw new NullPointerException(Bytes.toString(r.getRow()) + - ": second value is null"); + throw new NullPointerException(Bytes.toString(r.getRow()) + ": second value is null"); } byte[] secondReversed = new byte[secondValue.length]; for (int i = 0, j = secondValue.length - 1; j >= 0; j--, i++) { @@ -213,9 +203,9 @@ private void verifyAttempt(final Table table) throws IOException, NullPointerExc if (first.compareTo(second) != 0) { if (getLog().isDebugEnabled()) { - getLog().debug("second key is not the reverse of first. row=" + - Bytes.toStringBinary(r.getRow()) + ", first value=" + first + - ", second value=" + second); + getLog().debug( + "second key is not the reverse of first. row=" + Bytes.toStringBinary(r.getRow()) + + ", first value=" + first + ", second value=" + second); } fail(); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java index e329e912c3a0..eecffb0b7327 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableMapReduceUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; + import java.io.Closeable; import java.io.File; import java.util.Collection; @@ -58,7 +59,7 @@ /** * Test different variants of initTableMapperJob method */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestTableMapReduceUtil { private static final String HTTP_PRINCIPAL = "HTTP/localhost"; @@ -72,8 +73,8 @@ public void after() { } /* - * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because - * the method depends on an online cluster. + * initTableSnapshotMapperJob is tested in {@link TestTableSnapshotInputFormat} because the method + * depends on an online cluster. */ @Test @@ -81,9 +82,8 @@ public void testInitTableMapperJob1() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); // test - TableMapReduceUtil.initTableMapperJob( - "Table", new Scan(), Import.Importer.class, Text.class, Text.class, job, - false, WALInputFormat.class); + TableMapReduceUtil.initTableMapperJob("Table", new Scan(), Import.Importer.class, Text.class, + Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -96,9 +96,8 @@ public void testInitTableMapperJob1() throws Exception { public void testInitTableMapperJob2() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job, false, WALInputFormat.class); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -111,9 +110,8 @@ public void testInitTableMapperJob2() throws Exception { public void testInitTableMapperJob3() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -126,9 +124,8 @@ public void testInitTableMapperJob3() throws Exception { public void testInitTableMapperJob4() throws Exception { Configuration configuration = new Configuration(); Job job = Job.getInstance(configuration, "tableName"); - TableMapReduceUtil.initTableMapperJob( - Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, - Text.class, job, false); + TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, + Text.class, Text.class, job, false); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); @@ -137,8 +134,8 @@ public void testInitTableMapperJob4() throws Exception { assertEquals("Table", job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); } - private static Closeable startSecureMiniCluster( - HBaseTestingUtility util, MiniKdc kdc, String principal) throws Exception { + private static Closeable startSecureMiniCluster(HBaseTestingUtility util, MiniKdc kdc, + String principal) throws Exception { Configuration conf = util.getConfiguration(); SecureTestUtil.enableSecurity(conf); @@ -148,8 +145,8 @@ private static Closeable startSecureMiniCluster( conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + ',' + TokenProvider.class.getName()); - HBaseKerberosUtils.setSecuredConfiguration(conf, - principal + '@' + kdc.getRealm(), HTTP_PRINCIPAL + '@' + kdc.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, principal + '@' + kdc.getRealm(), + HTTP_PRINCIPAL + '@' + kdc.getRealm()); KerberosName.resetDefaultRealm(); @@ -191,8 +188,7 @@ public void testInitCredentialsForCluster1() throws Exception { @Test @SuppressWarnings("unchecked") - public void testInitCredentialsForCluster2() - throws Exception { + public void testInitCredentialsForCluster2() throws Exception { HBaseTestingUtility util1 = new HBaseTestingUtility(); HBaseTestingUtility util2 = new HBaseTestingUtility(); @@ -204,7 +200,7 @@ public void testInitCredentialsForCluster2() loginUserFromKeytab(userPrincipal + '@' + kdc.getRealm(), keytab.getAbsolutePath()); try (Closeable util1Closeable = startSecureMiniCluster(util1, kdc, userPrincipal); - Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) { + Closeable util2Closeable = startSecureMiniCluster(util2, kdc, userPrincipal)) { try { Configuration conf1 = util1.getConfiguration(); Job job = Job.getInstance(conf1); @@ -217,7 +213,7 @@ public void testInitCredentialsForCluster2() String clusterId = ZKClusterId.readClusterIdZNode(util2.getZooKeeperWatcher()); Token tokenForCluster = - (Token) credentials.getToken(new Text(clusterId)); + (Token) credentials.getToken(new Text(clusterId)); assertEquals(userPrincipal + '@' + kdc.getRealm(), tokenForCluster.decodeIdentifier().getUsername()); } finally { @@ -260,8 +256,7 @@ public void testInitCredentialsForCluster3() throws Exception { @Test @SuppressWarnings("unchecked") - public void testInitCredentialsForCluster4() - throws Exception { + public void testInitCredentialsForCluster4() throws Exception { HBaseTestingUtility util1 = new HBaseTestingUtility(); // Assume util1 is insecure cluster // Do not start util1 because cannot boot secured mini cluster and insecure mini cluster at once @@ -287,7 +282,7 @@ public void testInitCredentialsForCluster4() String clusterId = ZKClusterId.readClusterIdZNode(util2.getZooKeeperWatcher()); Token tokenForCluster = - (Token) credentials.getToken(new Text(clusterId)); + (Token) credentials.getToken(new Text(clusterId)); assertEquals(userPrincipal + '@' + kdc.getRealm(), tokenForCluster.decodeIdentifier().getUsername()); } finally { diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java index 88e40a75c53c..3d6cd387a917 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableRecordReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.HBaseTestingUtility; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java index 188fc1f70706..6d885b330fd6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSnapshotInputFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,11 +17,11 @@ */ package org.apache.hadoop.hbase.mapreduce; +import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION; +import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_ROW_LIMIT_PER_INPUTSPLIT; -import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION; -import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT; import static org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormatImpl.SNAPSHOT_INPUTFORMAT_SCANNER_READTYPE; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; @@ -70,7 +70,7 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Lists; -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestTableSnapshotInputFormat extends TableSnapshotInputFormatTestBase { @ClassRule @@ -97,7 +97,6 @@ protected byte[] getEndRow() { return yyy; } - @Test public void testGetBestLocations() throws IOException { TableSnapshotInputFormatImpl tsif = new TableSnapshotInputFormatImpl(); @@ -107,36 +106,36 @@ public void testGetBestLocations() throws IOException { Assert.assertEquals(null, TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); blockDistribution = new HDFSBlocksDistribution(); - blockDistribution.addHostsAndBlockWeight(new String[] {"h1"}, 10); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 7); - blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 5); - blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 1); + blockDistribution.addHostsAndBlockWeight(new String[] { "h1" }, 10); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 7); + blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 5); + blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 1); Assert.assertEquals(Lists.newArrayList("h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 2); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 2); Assert.assertEquals(Lists.newArrayList("h1", "h2"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h2"}, 3); + blockDistribution.addHostsAndBlockWeight(new String[] { "h2" }, 3); Assert.assertEquals(Lists.newArrayList("h2", "h1"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); - blockDistribution.addHostsAndBlockWeight(new String[] {"h3"}, 6); - blockDistribution.addHostsAndBlockWeight(new String[] {"h4"}, 9); + blockDistribution.addHostsAndBlockWeight(new String[] { "h3" }, 6); + blockDistribution.addHostsAndBlockWeight(new String[] { "h4" }, 9); Assert.assertEquals(Lists.newArrayList("h2", "h3", "h4"), TableSnapshotInputFormatImpl.getBestLocations(conf, blockDistribution)); @@ -147,10 +146,10 @@ public static enum TestTableSnapshotCounters { } public static class TestTableSnapshotMapper - extends TableMapper { + extends TableMapper { @Override - protected void map(ImmutableBytesWritable key, Result value, - Context context) throws IOException, InterruptedException { + protected void map(ImmutableBytesWritable key, Result value, Context context) + throws IOException, InterruptedException { // Validate a single row coming from the snapshot, and emit the row key verifyRowFromMap(key, value); context.write(key, NullWritable.get()); @@ -158,18 +157,18 @@ protected void map(ImmutableBytesWritable key, Result value, } public static class TestTableSnapshotReducer - extends Reducer { + extends Reducer { HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(bbb, yyy); + @Override protected void reduce(ImmutableBytesWritable key, Iterable values, - Context context) throws IOException, InterruptedException { + Context context) throws IOException, InterruptedException { rowTracker.addRow(key.get()); } @Override - protected void cleanup(Context context) throws IOException, - InterruptedException { + protected void cleanup(Context context) throws IOException, InterruptedException { rowTracker.validate(); } } @@ -184,19 +183,17 @@ public void testInitTableSnapshotMapperJobConfig() throws Exception { Job job = new Job(UTIL.getConfiguration()); Path tmpTableDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); // TODO: would be better to examine directly the cache instance that results from this // config. Currently this is not possible because BlockCache initialization is static. - Assert.assertEquals( - "Snapshot job should be configured for default LruBlockCache.", + Assert.assertEquals("Snapshot job should be configured for default LruBlockCache.", HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT, job.getConfiguration().getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, -1), 0.01); - Assert.assertEquals( - "Snapshot job should not use BucketCache.", - 0, job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); + Assert.assertEquals("Snapshot job should not use BucketCache.", 0, + job.getConfiguration().getFloat("hbase.bucketcache.size", -1), 0.01); } finally { UTIL.getAdmin().deleteSnapshot(snapshotName); UTIL.deleteTable(tableName); @@ -208,8 +205,7 @@ public void testWithMockedMapReduceSingleRegionByRegionLocation() throws Excepti Configuration conf = UTIL.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, true); try { - testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, - true); + testWithMockedMapReduce(UTIL, name.getMethodName() + "Snapshot", 1, 1, 1, true); } finally { conf.unset(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION); } @@ -219,19 +215,18 @@ public void testWithMockedMapReduceSingleRegionByRegionLocation() throws Excepti public void testRestoreSnapshotDoesNotCreateBackRefLinksInit(TableName tableName, String snapshotName, Path tmpTableDir) throws Exception { Job job = new Job(UTIL.getConfiguration()); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + tmpTableDir); } @Override - public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, - int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) + public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName, int numRegions, + int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo) throws Exception { final TableName tableName = TableName.valueOf(name.getMethodName()); try { - createTableAndSnapshot( - util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); + createTableAndSnapshot(util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions); Configuration conf = util.getConfiguration(); conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, setLocalityEnabledTo); @@ -242,14 +237,13 @@ public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotNam Scan scan = new Scan(getStartRow(), getEndRow()); // limit the scan if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, tmpTableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + false, tmpTableDir); } verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow()); @@ -418,7 +412,7 @@ public void testScannerReadTypeConfiguration() throws IOException { Scan scanWithReadType = new Scan(); scanWithReadType.setReadType(readType); assertEquals(scanWithReadType.getReadType(), - serializeAndReturn(conf, scanWithReadType).getReadType()); + serializeAndReturn(conf, scanWithReadType).getReadType()); } // We should only see the DEFAULT ReadType getting updated to STREAM. Scan scanWithoutReadType = new Scan(); @@ -432,8 +426,8 @@ public void testScannerReadTypeConfiguration() throws IOException { } /** - * Serializes and deserializes the given scan in the same manner that - * TableSnapshotInputFormat does. + * Serializes and deserializes the given scan in the same manner that TableSnapshotInputFormat + * does. */ private Scan serializeAndReturn(Configuration conf, Scan s) throws IOException { conf.set(TableInputFormat.SCAN, TableMapReduceUtil.convertScanToString(s)); @@ -441,8 +435,7 @@ private Scan serializeAndReturn(Configuration conf, Scan s) throws IOException { } private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumSplits, - byte[] startRow, byte[] stopRow) - throws IOException, InterruptedException { + byte[] startRow, byte[] stopRow) throws IOException, InterruptedException { TableSnapshotInputFormat tsif = new TableSnapshotInputFormat(); List splits = tsif.getSplits(job); @@ -451,13 +444,12 @@ private void verifyWithMockedMapReduce(Job job, int numRegions, int expectedNumS HBaseTestingUtility.SeenRowTracker rowTracker = new HBaseTestingUtility.SeenRowTracker(startRow, stopRow.length > 0 ? stopRow : Bytes.toBytes("\uffff")); - boolean localityEnabled = - job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, - SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); + boolean localityEnabled = job.getConfiguration().getBoolean( + SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_DEFAULT); boolean byRegionLoc = - job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, - SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT); + job.getConfiguration().getBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION, + SNAPSHOT_INPUTFORMAT_LOCALITY_BY_REGION_LOCATION_DEFAULT); for (int i = 0; i < splits.size(); i++) { // validate input split InputSplit split = splits.get(i); @@ -545,17 +537,16 @@ public static void doTestWithMapReduce(HBaseTestingUtility util, TableName table job.setJarByClass(util.getClass()); TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(), - TestTableSnapshotInputFormat.class); + TestTableSnapshotInputFormat.class); if (numSplitsPerRegion > 1) { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, true, tableDir, new RegionSplitter.UniformSplit(), - numSplitsPerRegion); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + true, tableDir, new RegionSplitter.UniformSplit(), numSplitsPerRegion); } else { - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, true, tableDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, scan, + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, + true, tableDir); } job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class); @@ -583,12 +574,12 @@ public void testCleanRestoreDir() throws Exception { createTableAndSnapshot(UTIL, tableName, snapshotName, getStartRow(), getEndRow(), 1); Job job = Job.getInstance(UTIL.getConfiguration()); Path workingDir = UTIL.getDataTestDirOnTestFS(snapshotName); - TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, - new Scan(), TestTableSnapshotMapper.class, ImmutableBytesWritable.class, - NullWritable.class, job, false, workingDir); + TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName, new Scan(), + TestTableSnapshotMapper.class, ImmutableBytesWritable.class, NullWritable.class, job, false, + workingDir); FileSystem fs = workingDir.getFileSystem(job.getConfiguration()); - Path restorePath = new Path(job.getConfiguration() - .get("hbase.TableSnapshotInputFormat.restore.dir")); + Path restorePath = + new Path(job.getConfiguration().get("hbase.TableSnapshotInputFormat.restore.dir")); Assert.assertTrue(fs.exists(restorePath)); TableSnapshotInputFormat.cleanRestoreDir(job, snapshotName); Assert.assertFalse(fs.exists(restorePath)); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java index efa0a1b7e691..19ab10821b85 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTableSplit.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.junit.experimental.categories.Category; import org.junit.rules.TestName; -@Category({MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestTableSplit { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -46,11 +46,9 @@ public class TestTableSplit { @Test public void testHashCode() { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location"); + "row-start".getBytes(), "row-end".getBytes(), "location"); TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location"); + "row-start".getBytes(), "row-end".getBytes(), "location"); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); HashSet set = new HashSet<>(2); @@ -61,15 +59,13 @@ public void testHashCode() { /** * length of region should not influence hashcode - * */ + */ @Test public void testHashCode_length() { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location", 1984); + "row-start".getBytes(), "row-end".getBytes(), "location", 1984); TableSplit split2 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location", 1982); + "row-start".getBytes(), "row-end".getBytes(), "location", 1982); assertEquals(split1, split2); assertTrue(split1.hashCode() == split2.hashCode()); @@ -81,16 +77,14 @@ public void testHashCode_length() { /** * Length of region need to be properly serialized. - * */ + */ @Test public void testLengthIsSerialized() throws Exception { TableSplit split1 = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start".getBytes(), - "row-end".getBytes(), "location", 666); + "row-start".getBytes(), "row-end".getBytes(), "location", 666); TableSplit deserialized = new TableSplit(TableName.valueOf(name.getMethodName()), - "row-start2".getBytes(), - "row-end2".getBytes(), "location1"); + "row-start2".getBytes(), "row-end2".getBytes(), "location1"); ReflectionUtils.copy(new Configuration(), split1, deserialized); Assert.assertEquals(666, deserialized.getLength()); @@ -98,36 +92,26 @@ public void testLengthIsSerialized() throws Exception { @Test public void testToString() { - TableSplit split = - new TableSplit(TableName.valueOf(name.getMethodName()), "row-start".getBytes(), "row-end".getBytes(), - "location"); - String str = - "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " - + "regionname=)"; + TableSplit split = new TableSplit(TableName.valueOf(name.getMethodName()), + "row-start".getBytes(), "row-end".getBytes(), "location"); + String str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + + "endrow=row-end, regionLocation=location, " + "regionname=)"; Assert.assertEquals(str, split.toString()); - split = - new TableSplit(TableName.valueOf(name.getMethodName()), null, "row-start".getBytes(), - "row-end".getBytes(), "location", "encoded-region-name", 1000L); - str = - "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " - + "endrow=row-end, regionLocation=location, " - + "regionname=encoded-region-name)"; + split = new TableSplit(TableName.valueOf(name.getMethodName()), null, "row-start".getBytes(), + "row-end".getBytes(), "location", "encoded-region-name", 1000L); + str = "Split(tablename=" + name.getMethodName() + ", startrow=row-start, " + + "endrow=row-end, regionLocation=location, " + "regionname=encoded-region-name)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null); - str = - "Split(tablename=null, startrow=null, " - + "endrow=null, regionLocation=null, " - + "regionname=)"; + str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + + "regionname=)"; Assert.assertEquals(str, split.toString()); split = new TableSplit(null, null, null, null, null, null, 1000L); - str = - "Split(tablename=null, startrow=null, " - + "endrow=null, regionLocation=null, " - + "regionname=null)"; + str = "Split(tablename=null, startrow=null, " + "endrow=null, regionLocation=null, " + + "regionname=null)"; Assert.assertEquals(str, split.toString()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java index 6c4d0b6f06b2..21d38f9bbad6 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestTimeRangeMapRed.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -60,7 +60,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MapReduceTests.class, LargeTests.class}) +@Category({ MapReduceTests.class, LargeTests.class }) public class TestTimeRangeMapRed { @ClassRule @@ -68,20 +68,19 @@ public class TestTimeRangeMapRed { HBaseClassTestRule.forClass(TestTimeRangeMapRed.class); private final static Logger log = LoggerFactory.getLogger(TestTimeRangeMapRed.class); - private static final HBaseTestingUtility UTIL = - new HBaseTestingUtility(); + private static final HBaseTestingUtility UTIL = new HBaseTestingUtility(); private Admin admin; - private static final byte [] KEY = Bytes.toBytes("row1"); + private static final byte[] KEY = Bytes.toBytes("row1"); private static final NavigableMap TIMESTAMP = new TreeMap<>(); static { - TIMESTAMP.put((long)1245620000, false); - TIMESTAMP.put((long)1245620005, true); // include - TIMESTAMP.put((long)1245620010, true); // include - TIMESTAMP.put((long)1245620055, true); // include - TIMESTAMP.put((long)1245620100, true); // include - TIMESTAMP.put((long)1245620150, false); - TIMESTAMP.put((long)1245620250, false); + TIMESTAMP.put((long) 1245620000, false); + TIMESTAMP.put((long) 1245620005, true); // include + TIMESTAMP.put((long) 1245620010, true); // include + TIMESTAMP.put((long) 1245620055, true); // include + TIMESTAMP.put((long) 1245620100, true); // include + TIMESTAMP.put((long) 1245620150, false); + TIMESTAMP.put((long) 1245620250, false); } static final long MINSTAMP = 1245620005; static final long MAXSTAMP = 1245620100 + 1; // maxStamp itself is excluded. so increment it. @@ -106,16 +105,13 @@ public void before() throws Exception { } private static class ProcessTimeRangeMapper - extends TableMapper - implements Configurable { + extends TableMapper implements Configurable { private Configuration conf = null; private Table table = null; @Override - public void map(ImmutableBytesWritable key, Result result, - Context context) - throws IOException { + public void map(ImmutableBytesWritable key, Result result, Context context) throws IOException { List tsList = new ArrayList<>(); for (Cell kv : result.listCells()) { tsList.add(kv.getTimestamp()); @@ -150,7 +146,7 @@ public void setConf(Configuration configuration) { @Test public void testTimeRangeMapRed() - throws IOException, InterruptedException, ClassNotFoundException { + throws IOException, InterruptedException, ClassNotFoundException { final HTableDescriptor desc = new HTableDescriptor(TABLE_NAME); final HColumnDescriptor col = new HColumnDescriptor(FAMILY_NAME); col.setMaxVersions(Integer.MAX_VALUE); @@ -170,8 +166,7 @@ public void testTimeRangeMapRed() table.close(); } - private void runTestOnTable() - throws IOException, InterruptedException, ClassNotFoundException { + private void runTestOnTable() throws IOException, InterruptedException, ClassNotFoundException { Job job = null; try { job = new Job(UTIL.getConfiguration(), "test123"); @@ -181,16 +176,15 @@ private void runTestOnTable() scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.setTimeRange(MINSTAMP, MAXSTAMP); scan.setMaxVersions(); - TableMapReduceUtil.initTableMapperJob(TABLE_NAME, - scan, ProcessTimeRangeMapper.class, Text.class, Text.class, job); + TableMapReduceUtil.initTableMapperJob(TABLE_NAME, scan, ProcessTimeRangeMapper.class, + Text.class, Text.class, job); job.waitForCompletion(true); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } finally { if (job != null) { - FileUtil.fullyDelete( - new File(job.getConfiguration().get("hadoop.tmp.dir"))); + FileUtil.fullyDelete(new File(job.getConfiguration().get("hadoop.tmp.dir"))); } } } @@ -200,11 +194,11 @@ private void verify(final Table table) throws IOException { scan.addColumn(FAMILY_NAME, COLUMN_NAME); scan.setMaxVersions(1); ResultScanner scanner = table.getScanner(scan); - for (Result r: scanner) { + for (Result r : scanner) { for (Cell kv : r.listCells()) { log.debug(Bytes.toString(r.getRow()) + "\t" + Bytes.toString(CellUtil.cloneFamily(kv)) - + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) - + "\t" + kv.getTimestamp() + "\t" + Bytes.toBoolean(CellUtil.cloneValue(kv))); + + "\t" + Bytes.toString(CellUtil.cloneQualifier(kv)) + "\t" + kv.getTimestamp() + "\t" + + Bytes.toBoolean(CellUtil.cloneValue(kv))); org.junit.Assert.assertEquals(TIMESTAMP.get(kv.getTimestamp()), Bytes.toBoolean(CellUtil.cloneValue(kv))); } @@ -213,4 +207,3 @@ private void verify(final Table table) throws IOException { } } - diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java index 48e85183923e..531a454cd005 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALInputFormat.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.mapreduce; import static org.junit.Assert.assertEquals; + import java.util.ArrayList; import java.util.List; import org.apache.hadoop.fs.FileStatus; @@ -32,11 +33,11 @@ import org.junit.experimental.categories.Category; import org.mockito.Mockito; -@Category({ MapReduceTests.class, SmallTests.class}) +@Category({ MapReduceTests.class, SmallTests.class }) public class TestWALInputFormat { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestWALInputFormat.class); + HBaseClassTestRule.forClass(TestWALInputFormat.class); /** * Test the primitive start/end time filtering. diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java index 49554515817b..94e18dc3bf84 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALPlayer.java @@ -24,6 +24,7 @@ import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; + import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; @@ -71,8 +72,8 @@ /** * Basic test for the WALPlayer M/R tool */ -@Category({MapReduceTests.class, LargeTests.class}) -//TODO : Remove this in 3.0 +@Category({ MapReduceTests.class, LargeTests.class }) +// TODO : Remove this in 3.0 public class TestWALPlayer { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -115,9 +116,9 @@ public void testPlayingRecoveredEdit() throws Exception { TEST_UTIL.createTable(tn, TestRecoveredEdits.RECOVEREDEDITS_COLUMNFAMILY); // Copy testing recovered.edits file that is over under hbase-server test resources // up into a dir in our little hdfs cluster here. - String hbaseServerTestResourcesEdits = System.getProperty("test.build.classes") + - "/../../../hbase-server/src/test/resources/" + - TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); + String hbaseServerTestResourcesEdits = + System.getProperty("test.build.classes") + "/../../../hbase-server/src/test/resources/" + + TestRecoveredEdits.RECOVEREDEDITS_PATH.getName(); assertTrue(new File(hbaseServerTestResourcesEdits).exists()); FileSystem dfs = TEST_UTIL.getDFSCluster().getFileSystem(); // Target dir. @@ -125,7 +126,7 @@ public void testPlayingRecoveredEdit() throws Exception { assertTrue(dfs.mkdirs(targetDir)); dfs.copyFromLocalFile(new Path(hbaseServerTestResourcesEdits), targetDir); assertEquals(0, - ToolRunner.run(new WALPlayer(this.conf), new String [] {targetDir.toString()})); + ToolRunner.run(new WALPlayer(this.conf), new String[] { targetDir.toString() })); // I don't know how many edits are in this file for this table... so just check more than 1. assertTrue(TEST_UTIL.countRows(tn) > 0); } @@ -157,19 +158,17 @@ public void testWALPlayer() throws Exception { // replay the WAL, map table 1 to table 2 WAL log = cluster.getRegionServer(0).getWAL(null); log.rollWriter(); - String walInputDir = new Path(cluster.getMaster().getMasterFileSystem() - .getWALRootDir(), HConstants.HREGION_LOGDIR_NAME).toString(); + String walInputDir = new Path(cluster.getMaster().getMasterFileSystem().getWALRootDir(), + HConstants.HREGION_LOGDIR_NAME).toString(); - Configuration configuration= TEST_UTIL.getConfiguration(); + Configuration configuration = TEST_UTIL.getConfiguration(); WALPlayer player = new WALPlayer(configuration); - String optionName="_test_.name"; + String optionName = "_test_.name"; configuration.set(optionName, "1000"); player.setupTime(configuration, optionName); - assertEquals(1000,configuration.getLong(optionName,0)); + assertEquals(1000, configuration.getLong(optionName, 0)); assertEquals(0, ToolRunner.run(configuration, player, - new String[] {walInputDir, tableName1.getNameAsString(), - tableName2.getNameAsString() })); - + new String[] { walInputDir, tableName1.getNameAsString(), tableName2.getNameAsString() })); // verify the WAL was player into table 2 Get g = new Get(ROW); @@ -233,7 +232,7 @@ public void testMainMethod() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); - LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); + LauncherSecurityManager newSecurityManager = new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; @@ -246,8 +245,8 @@ public void testMainMethod() throws Exception { } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of arguments:")); - assertTrue(data.toString().contains("Usage: WALPlayer [options] " + - " [ ]")); + assertTrue(data.toString() + .contains("Usage: WALPlayer [options] " + " [ ]")); assertTrue(data.toString().contains("-Dwal.bulk.output=/path/for/output")); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java index 41f8f351cfd9..0998a18fab10 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TestWALRecordReader.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.List; import java.util.NavigableMap; @@ -82,7 +83,7 @@ public class TestWALRecordReader { private static Path walRootDir; // visible for TestHLogRecordReader static final TableName tableName = TableName.valueOf(getName()); - private static final byte [] rowName = tableName.getName(); + private static final byte[] rowName = tableName.getName(); // visible for TestHLogRecordReader static final RegionInfo info = RegionInfoBuilder.newBuilder(tableName).build(); private static final byte[] family = Bytes.toBytes("column"); @@ -145,8 +146,8 @@ public void testPartialRead() throws Exception { edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), ts, value)); log.appendData(info, getWalKeyImpl(ts, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts+1, value)); - log.appendData(info, getWalKeyImpl(ts+1, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), ts + 1, value)); + log.appendData(info, getWalKeyImpl(ts + 1, scopes), edit); log.sync(); Threads.sleep(10); LOG.info("Before 1st WAL roll " + log.toString()); @@ -157,17 +158,16 @@ public void testPartialRead() throws Exception { long ts1 = EnvironmentEdgeManager.currentTime(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1+1, value)); - log.appendData(info, getWalKeyImpl(ts1+1, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("3"), ts1 + 1, value)); + log.appendData(info, getWalKeyImpl(ts1 + 1, scopes), edit); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1+2, value)); - log.appendData(info, getWalKeyImpl(ts1+2, scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("4"), ts1 + 2, value)); + log.appendData(info, getWalKeyImpl(ts1 + 2, scopes), edit); log.sync(); log.shutdown(); walfactory.shutdown(); LOG.info("Closed WAL " + log.toString()); - WALInputFormat input = new WALInputFormat(); Configuration jobConf = new Configuration(conf); jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); @@ -178,7 +178,7 @@ public void testPartialRead() throws Exception { assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); - jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1+1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, ts1 + 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(2, splits.size()); // Both entries from first file are in-range. @@ -201,12 +201,12 @@ public void testPartialRead() throws Exception { public void testWALRecordReader() throws Exception { final WALFactory walfactory = new WALFactory(conf, getName()); WAL log = walfactory.getWAL(info); - byte [] value = Bytes.toBytes("value"); + byte[] value = Bytes.toBytes("value"); WALEdit edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), - EnvironmentEdgeManager.currentTime(), value)); - long txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("1"), EnvironmentEdgeManager.currentTime(), + value)); + long txid = + log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); Thread.sleep(1); // make sure 2nd log gets a later timestamp @@ -214,10 +214,9 @@ public void testWALRecordReader() throws Exception { log.rollWriter(); edit = new WALEdit(); - edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), - EnvironmentEdgeManager.currentTime(), value)); - txid = log.appendData(info, - getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); + edit.add(new KeyValue(rowName, family, Bytes.toBytes("2"), EnvironmentEdgeManager.currentTime(), + value)); + txid = log.appendData(info, getWalKeyImpl(EnvironmentEdgeManager.currentTime(), scopes), edit); log.sync(txid); log.shutdown(); walfactory.shutdown(); @@ -240,7 +239,7 @@ public void testWALRecordReader() throws Exception { // now test basic time ranges: // set an endtime, the 2nd log file can be ignored completely. - jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs-1); + jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs - 1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1, splits.size()); testSplit(splits.get(0), Bytes.toBytes("1")); @@ -283,8 +282,8 @@ private void testSplit(InputSplit split, byte[]... columns) throws Exception { } /** - * Create a new reader from the split, match the edits against the passed columns, - * moving WAL to archive in between readings + * Create a new reader from the split, match the edits against the passed columns, moving WAL to + * archive in between readings */ private void testSplitWithMovingWAL(InputSplit split, byte[] col1, byte[] col2) throws Exception { WALRecordReader reader = getReader(); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java index 8925615b5f27..eb5bb0c3b2f0 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapper.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.mapreduce; import java.io.IOException; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Durability; import org.apache.hadoop.hbase.client.Put; @@ -28,9 +27,9 @@ import org.apache.hadoop.io.Text; /** - * Dummy mapper used for unit tests to verify that the mapper can be injected. - * This approach would be used if a custom transformation needed to be done after - * reading the input data before writing it to HFiles. + * Dummy mapper used for unit tests to verify that the mapper can be injected. This approach would + * be used if a custom transformation needed to be done after reading the input data before writing + * it to HFiles. */ public class TsvImporterCustomTestMapper extends TsvImporterMapper { @Override @@ -39,12 +38,11 @@ protected void setup(Context context) { } /** - * Convert a line of TSV text into an HBase table row after transforming the - * values by multiplying them by 3. + * Convert a line of TSV text into an HBase table row after transforming the values by multiplying + * them by 3. */ @Override - public void map(LongWritable offset, Text value, Context context) - throws IOException { + public void map(LongWritable offset, Text value, Context context) throws IOException { byte[] family = Bytes.toBytes("FAM"); final byte[][] qualifiers = { Bytes.toBytes("A"), Bytes.toBytes("B") }; @@ -53,20 +51,19 @@ public void map(LongWritable offset, Text value, Context context) String[] valueTokens = new String(lineBytes, "UTF-8").split("\u001b"); // create the rowKey and Put - ImmutableBytesWritable rowKey = - new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); + ImmutableBytesWritable rowKey = new ImmutableBytesWritable(Bytes.toBytes(valueTokens[0])); Put put = new Put(rowKey.copyBytes()); put.setDurability(Durability.SKIP_WAL); - //The value should look like this: VALUE1 or VALUE2. Let's multiply - //the integer by 3 - for(int i = 1; i < valueTokens.length; i++) { + // The value should look like this: VALUE1 or VALUE2. Let's multiply + // the integer by 3 + for (int i = 1; i < valueTokens.length; i++) { String prefix = valueTokens[i].substring(0, "VALUE".length()); String suffix = valueTokens[i].substring("VALUE".length()); String newValue = prefix + Integer.parseInt(suffix) * 3; - KeyValue kv = new KeyValue(rowKey.copyBytes(), family, - qualifiers[i-1], Bytes.toBytes(newValue)); + KeyValue kv = + new KeyValue(rowKey.copyBytes(), family, qualifiers[i - 1], Bytes.toBytes(newValue)); put.add(kv); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java index 850d4abac80b..cc38ebd58207 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/mapreduce/TsvImporterCustomTestMapperForOprAttr.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.IOException; import java.util.Arrays; - import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.mapreduce.ImportTsv.TsvParser.BadTsvLineException; @@ -27,8 +26,7 @@ import org.apache.hadoop.hbase.util.Bytes; /** - * Just shows a simple example of how the attributes can be extracted and added - * to the puts + * Just shows a simple example of how the attributes can be extracted and added to the puts */ public class TsvImporterCustomTestMapperForOprAttr extends TsvImporterMapper { @Override diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java index 26cee49e89ee..97b8871bd36b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/regionserver/TestCompactionTool.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -44,7 +44,7 @@ public class TestCompactionTool { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestCompactionTool.class); + HBaseClassTestRule.forClass(TestCompactionTool.class); private final HBaseTestingUtility testUtil = new HBaseTestingUtility(); @@ -77,20 +77,20 @@ public void testCompactedFilesArchived() throws Exception { Path tableDir = CommonFSUtils.getTableDir(rootDir, region.getRegionInfo().getTable()); FileSystem fs = store.getFileSystem(); String storePath = tableDir + "/" + region.getRegionInfo().getEncodedName() + "/" - + Bytes.toString(HBaseTestingUtility.fam1); + + Bytes.toString(HBaseTestingUtility.fam1); FileStatus[] regionDirFiles = fs.listStatus(new Path(storePath)); assertEquals(10, regionDirFiles.length); String defaultFS = testUtil.getMiniHBaseCluster().getConfiguration().get("fs.defaultFS"); Configuration config = HBaseConfiguration.create(); config.set("fs.defaultFS", defaultFS); int result = ToolRunner.run(config, new CompactionTool(), - new String[]{"-compactOnce", "-major", storePath}); - assertEquals(0,result); + new String[] { "-compactOnce", "-major", storePath }); + assertEquals(0, result); regionDirFiles = fs.listStatus(new Path(storePath)); assertEquals(1, regionDirFiles.length); } - private void putAndFlush(int key) throws Exception{ + private void putAndFlush(int key) throws Exception { Put put = new Put(Bytes.toBytes(key)); put.addColumn(HBaseTestingUtility.fam1, qualifier, Bytes.toBytes("val" + key)); region.put(put); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java index 04b766df435f..4d6b57724eb5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplication.java @@ -95,9 +95,10 @@ public void setUp() throws Exception { public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); - TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100) - .build()).build(); + TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100).build()) + .build(); Connection connection2 = ConnectionFactory.createConnection(CONF2); try (Admin admin2 = connection2.getAdmin()) { @@ -249,7 +250,6 @@ static void checkRestoreTmpDir(Configuration conf, String restoreTmpDir, int exp } } - @Test public void testVerifyRepJobWithQuorumAddress() throws Exception { // Populate the tables, at the same time it guarantees that the tables are @@ -300,10 +300,10 @@ public void testVerifyRepJobWithQuorumAddressAndSnapshotSupport() throws Excepti String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), - tableName.getNameAsString() }; + "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); checkRestoreTmpDir(CONF1, tmpPath1, 1); checkRestoreTmpDir(CONF2, tmpPath2, 1); @@ -330,10 +330,10 @@ public void testVerifyRepJobWithQuorumAddressAndSnapshotSupport() throws Excepti Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true); args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), - tableName.getNameAsString() }; + "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + tableName.getNameAsString() }; runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); checkRestoreTmpDir(CONF1, tmpPath1, 2); checkRestoreTmpDir(CONF2, tmpPath2, 2); @@ -391,25 +391,25 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti FileSystem fs = rootDir.getFileSystem(CONF1); String sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, - Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); // Take target snapshot Path peerRootDir = CommonFSUtils.getRootDir(CONF2); FileSystem peerFs = peerRootDir.getFileSystem(CONF2); String peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, - Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); String peerFSAddress = peerFs.getUri().toString(); String tmpPath1 = UTIL1.getRandomDir().toString(); String tmpPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), - tableName.getNameAsString() }; + "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, + "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, + "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + tableName.getNameAsString() }; runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); checkRestoreTmpDir(CONF1, tmpPath1, 1); checkRestoreTmpDir(CONF2, tmpPath2, 1); @@ -421,7 +421,7 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti put = new Put(result.getRow()); Cell firstVal = result.rawCells()[0]; put.addColumn(CellUtil.cloneFamily(firstVal), CellUtil.cloneQualifier(firstVal), - Bytes.toBytes("diff data")); + Bytes.toBytes("diff data")); htable3.put(put); } Delete delete = new Delete(put.getRow()); @@ -429,18 +429,18 @@ public void testVerifyRepJobWithPeerTableNameAndSnapshotSupport() throws Excepti sourceSnapshotName = "sourceSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL1.getAdmin(), tableName, - Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); + Bytes.toString(noRepfamName), sourceSnapshotName, rootDir, fs, true); peerSnapshotName = "peerSnapshot-" + EnvironmentEdgeManager.currentTime(); SnapshotTestingUtils.createSnapshotAndValidate(UTIL2.getAdmin(), peerTableName, - Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); + Bytes.toString(noRepfamName), peerSnapshotName, peerRootDir, peerFs, true); args = new String[] { "--peerTableName=" + peerTableName.getNameAsString(), - "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + tmpPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + tmpPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), - tableName.getNameAsString() }; + "--sourceSnapshotName=" + sourceSnapshotName, "--sourceSnapshotTmpDir=" + tmpPath1, + "--peerSnapshotName=" + peerSnapshotName, "--peerSnapshotTmpDir=" + tmpPath2, + "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), UTIL2.getClusterKey(), + tableName.getNameAsString() }; runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); checkRestoreTmpDir(CONF1, tmpPath1, 2); checkRestoreTmpDir(CONF2, tmpPath2, 2); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java index 51a0748b0d61..2523f3234a1e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationAdjunct.java @@ -90,9 +90,10 @@ public void setUp() throws Exception { @BeforeClass public static void setUpBeforeClass() throws Exception { TestReplicationBase.setUpBeforeClass(); - TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName).setColumnFamily( - ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100) - .build()).build(); + TableDescriptor peerTable = TableDescriptorBuilder.newBuilder(peerTableName) + .setColumnFamily( + ColumnFamilyDescriptorBuilder.newBuilder(noRepfamName).setMaxVersions(100).build()) + .build(); Connection connection2 = ConnectionFactory.createConnection(CONF2); try (Admin admin2 = connection2.getAdmin()) { admin2.createTable(peerTable, HBaseTestingUtility.KEYS_FOR_HBA_CREATE_TABLE); @@ -303,10 +304,10 @@ public void testVerifyReplicationWithSnapshotSupport() throws Exception { String temPath2 = "/tmp" + EnvironmentEdgeManager.currentTime(); String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", - tableName.getNameAsString() }; + "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", + tableName.getNameAsString() }; TestVerifyReplication.runVerifyReplication(args, NB_ROWS_IN_BATCH, 0); TestVerifyReplication.checkRestoreTmpDir(CONF1, temPath1, 1); TestVerifyReplication.checkRestoreTmpDir(CONF2, temPath2, 1); @@ -333,10 +334,10 @@ public void testVerifyReplicationWithSnapshotSupport() throws Exception { Bytes.toString(famName), peerSnapshotName, peerRootDir, peerFs, true); args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", - tableName.getNameAsString() }; + "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(CONF2), "2", + tableName.getNameAsString() }; TestVerifyReplication.runVerifyReplication(args, 0, NB_ROWS_IN_BATCH); TestVerifyReplication.checkRestoreTmpDir(CONF1, temPath1, 2); TestVerifyReplication.checkRestoreTmpDir(CONF2, temPath2, 2); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java index 1609865d737d..0c160a4670f1 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationCrossDiffHdfs.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; @@ -182,9 +181,10 @@ public void testVerifyRepBySnapshot() throws Exception { String temPath2 = "/tmp2"; String[] args = new String[] { "--sourceSnapshotName=" + sourceSnapshotName, - "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, - "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, - "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(conf2), PEER_ID, TABLE_NAME.toString() }; + "--sourceSnapshotTmpDir=" + temPath1, "--peerSnapshotName=" + peerSnapshotName, + "--peerSnapshotTmpDir=" + temPath2, "--peerFSAddress=" + peerFSAddress, + "--peerHBaseRootAddress=" + CommonFSUtils.getRootDir(conf2), PEER_ID, + TABLE_NAME.toString() }; // Use the yarn's config override the source cluster's config. Configuration newConf = HBaseConfiguration.create(conf1); diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java index 7df9640b5255..f65265ec3489 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/replication/TestVerifyReplicationSecureClusterCredentials.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +18,7 @@ package org.apache.hadoop.hbase.replication; import static org.junit.Assert.assertEquals; + import java.io.File; import java.io.IOException; import java.util.Arrays; @@ -62,14 +62,14 @@ public class TestVerifyReplicationSecureClusterCredentials { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestVerifyReplicationSecureClusterCredentials.class); + HBaseClassTestRule.forClass(TestVerifyReplicationSecureClusterCredentials.class); private static MiniKdc KDC; private static final HBaseTestingUtility UTIL1 = new HBaseTestingUtility(); private static final HBaseTestingUtility UTIL2 = new HBaseTestingUtility(); private static final File KEYTAB_FILE = - new File(UTIL1.getDataTestDir("keytab").toUri().getPath()); + new File(UTIL1.getDataTestDir("keytab").toUri().getPath()); private static final String LOCALHOST = "localhost"; private static String CLUSTER_PRINCIPAL; @@ -96,8 +96,8 @@ private static void setupCluster(HBaseTestingUtility util) throws Exception { conf.set(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AccessController.class.getName() + ',' + TokenProvider.class.getName()); - HBaseKerberosUtils.setSecuredConfiguration(conf, - CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), HTTP_PRINCIPAL + '@' + KDC.getRealm()); + HBaseKerberosUtils.setSecuredConfiguration(conf, CLUSTER_PRINCIPAL + '@' + KDC.getRealm(), + HTTP_PRINCIPAL + '@' + KDC.getRealm()); util.startMiniCluster(); } @@ -112,13 +112,14 @@ public static void beforeClass() throws Exception { setupCluster(UTIL2); try (Admin admin = UTIL1.getAdmin()) { - admin.addReplicationPeer("1", ReplicationPeerConfig.newBuilder() - .setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())) - .putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL, - UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL)) - .putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL, - UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL)) - .build()); + admin.addReplicationPeer("1", + ReplicationPeerConfig.newBuilder() + .setClusterKey(ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())) + .putConfiguration(HBaseKerberosUtils.KRB_PRINCIPAL, + UTIL2.getConfiguration().get(HBaseKerberosUtils.KRB_PRINCIPAL)) + .putConfiguration(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL, + UTIL2.getConfiguration().get(HBaseKerberosUtils.MASTER_KRB_PRINCIPAL)) + .build()); } } @@ -130,10 +131,8 @@ public static void cleanup() throws IOException { @Parameters public static Collection> peer() { - return Arrays.asList( - () -> "1", - () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration()) - ); + return Arrays.asList(() -> "1", + () -> ZKConfig.getZooKeeperClusterKey(UTIL2.getConfiguration())); } @Parameter @@ -143,11 +142,7 @@ public static Collection> peer() { @SuppressWarnings("unchecked") public void testJobCredentials() throws Exception { Job job = new VerifyReplication().createSubmittableJob( - new Configuration(UTIL1.getConfiguration()), - new String[] { - peer.get(), - "table" - }); + new Configuration(UTIL1.getConfiguration()), new String[] { peer.get(), "table" }); Credentials credentials = job.getCredentials(); Collection> tokens = credentials.getAllTokens(); @@ -155,12 +150,12 @@ public void testJobCredentials() throws Exception { String clusterId1 = ZKClusterId.readClusterIdZNode(UTIL1.getZooKeeperWatcher()); Token tokenForCluster1 = - (Token) credentials.getToken(new Text(clusterId1)); + (Token) credentials.getToken(new Text(clusterId1)); assertEquals(FULL_USER_PRINCIPAL, tokenForCluster1.decodeIdentifier().getUsername()); String clusterId2 = ZKClusterId.readClusterIdZNode(UTIL2.getZooKeeperWatcher()); Token tokenForCluster2 = - (Token) credentials.getToken(new Text(clusterId2)); + (Token) credentials.getToken(new Text(clusterId2)); assertEquals(FULL_USER_PRINCIPAL, tokenForCluster2.decodeIdentifier().getUsername()); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java index 3560ca4162bf..fc928d9c9ee3 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshot.java @@ -64,7 +64,7 @@ * Test Export Snapshot Tool */ @Ignore // HBASE-24493 -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestExportSnapshot { @ClassRule @@ -190,31 +190,30 @@ public void testExportWithTargetName() throws Exception { private void testExportFileSystemState(final TableName tableName, final byte[] snapshotName, final byte[] targetName, int filesExpected) throws Exception { - testExportFileSystemState(tableName, snapshotName, targetName, - filesExpected, getHdfsDestinationDir(), false); + testExportFileSystemState(tableName, snapshotName, targetName, filesExpected, + getHdfsDestinationDir(), false); } - protected void testExportFileSystemState(final TableName tableName, - final byte[] snapshotName, final byte[] targetName, int filesExpected, - Path copyDir, boolean overwrite) throws Exception { + protected void testExportFileSystemState(final TableName tableName, final byte[] snapshotName, + final byte[] targetName, int filesExpected, Path copyDir, boolean overwrite) + throws Exception { testExportFileSystemState(TEST_UTIL.getConfiguration(), tableName, snapshotName, targetName, - filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, - overwrite, getBypassRegionPredicate(), true); + filesExpected, TEST_UTIL.getDefaultRootDirPath(), copyDir, overwrite, + getBypassRegionPredicate(), true); } /** * Creates destination directory, runs ExportSnapshot() tool, and runs some verifications. */ protected static void testExportFileSystemState(final Configuration conf, - final TableName tableName, - final byte[] snapshotName, final byte[] targetName, final int filesExpected, - final Path srcDir, Path rawTgtDir, final boolean overwrite, + final TableName tableName, final byte[] snapshotName, final byte[] targetName, + final int filesExpected, final Path srcDir, Path rawTgtDir, final boolean overwrite, final RegionPredicate bypassregionPredicate, boolean success) throws Exception { FileSystem tgtFs = rawTgtDir.getFileSystem(conf); FileSystem srcFs = srcDir.getFileSystem(conf); Path tgtDir = rawTgtDir.makeQualified(tgtFs.getUri(), tgtFs.getWorkingDirectory()); - LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", - tgtFs.getUri(), tgtDir, rawTgtDir, srcFs.getUri(), srcDir); + LOG.info("tgtFsUri={}, tgtDir={}, rawTgtDir={}, srcFsUri={}, srcDir={}", tgtFs.getUri(), tgtDir, + rawTgtDir, srcFs.getUri(), srcDir); List opts = new ArrayList<>(); opts.add("--snapshot"); opts.add(Bytes.toString(snapshotName)); @@ -242,11 +241,11 @@ protected static void testExportFileSystemState(final Configuration conf, // Verify File-System state FileStatus[] rootFiles = tgtFs.listStatus(tgtDir); assertEquals(filesExpected > 0 ? 2 : 1, rootFiles.length); - for (FileStatus fileStatus: rootFiles) { + for (FileStatus fileStatus : rootFiles) { String name = fileStatus.getPath().getName(); assertTrue(fileStatus.toString(), fileStatus.isDirectory()); - assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) || - name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); + assertTrue(name.toString(), name.equals(HConstants.SNAPSHOT_DIR_NAME) + || name.equals(HConstants.HFILE_ARCHIVE_DIRECTORY)); } LOG.info("Verified filesystem state"); @@ -273,12 +272,12 @@ protected static void verifySnapshotDir(final FileSystem fs1, final Path root1, protected static Set verifySnapshot(final Configuration conf, final FileSystem fs, final Path rootDir, final TableName tableName, final String snapshotName, final RegionPredicate bypassregionPredicate) throws IOException { - final Path exportedSnapshot = new Path(rootDir, - new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); + final Path exportedSnapshot = + new Path(rootDir, new Path(HConstants.SNAPSHOT_DIR_NAME, snapshotName)); final Set snapshotFiles = new HashSet<>(); final Path exportedArchive = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY); SnapshotReferenceUtil.visitReferencedFiles(conf, fs, exportedSnapshot, - new SnapshotReferenceUtil.SnapshotVisitor() { + new SnapshotReferenceUtil.SnapshotVisitor() { @Override public void storeFile(final RegionInfo regionInfo, final String family, final SnapshotRegionManifest.StoreFile storeFile) throws IOException { @@ -290,8 +289,8 @@ public void storeFile(final RegionInfo regionInfo, final String family, snapshotFiles.add(hfile); if (!storeFile.hasReference()) { verifyNonEmptyFile(new Path(exportedArchive, - new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), - new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); + new Path(CommonFSUtils.getTableDir(new Path("./"), tableName), + new Path(regionInfo.getEncodedName(), new Path(family, hfile))))); } } @@ -315,7 +314,7 @@ private static Set listFiles(final FileSystem fs, final Path root, final int rootPrefix = root.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString().length(); FileStatus[] list = CommonFSUtils.listStatus(fs, dir); if (list != null) { - for (FileStatus fstat: list) { + for (FileStatus fstat : list) { LOG.debug(Objects.toString(fstat.getPath())); if (fstat.isDirectory()) { files.addAll(listFiles(fs, root, fstat.getPath())); @@ -329,8 +328,8 @@ private static Set listFiles(final FileSystem fs, final Path root, final private Path getHdfsDestinationDir() { Path rootDir = TEST_UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir(); - Path path = new Path(new Path(rootDir, "export-test"), "export-" + - EnvironmentEdgeManager.currentTime()); + Path path = new Path(new Path(rootDir, "export-test"), + "export-" + EnvironmentEdgeManager.currentTime()); LOG.info("HDFS export destination path: " + path); return path; } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java index 7238433d4db0..d281ec586891 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotAdjunct.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hbase.snapshot; import static org.junit.Assert.assertFalse; + import java.util.Iterator; import java.util.Map; import org.apache.hadoop.conf.Configuration; @@ -44,13 +45,13 @@ import org.slf4j.LoggerFactory; /** - * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but - * the test suite ran too close to the maximum time limit so we split these out. Uses - * facility from TestExportSnapshot where possible. + * Tests that are adjunct to {@link TestExportSnapshot}. They used to be in same test suite but the + * test suite ran too close to the maximum time limit so we split these out. Uses facility from + * TestExportSnapshot where possible. * @see TestExportSnapshot */ @Ignore // HBASE-24493 -@Category({VerySlowMapReduceTests.class, LargeTests.class}) +@Category({ VerySlowMapReduceTests.class, LargeTests.class }) public class TestExportSnapshotAdjunct { private static final Logger LOG = LoggerFactory.getLogger(TestExportSnapshotAdjunct.class); @@ -76,12 +77,11 @@ public static void setUpBeforeClass() throws Exception { } /** - * Check for references to '/tmp'. We are trying to avoid having references to outside of the - * test data dir when running tests. References outside of the test dir makes it so concurrent - * tests can stamp on each other by mistake. This check is for references to the 'tmp'. - * - * This is a strange place for this test but I want somewhere where the configuration is - * full -- filed w/ hdfs and mapreduce configurations. + * Check for references to '/tmp'. We are trying to avoid having references to outside of the test + * data dir when running tests. References outside of the test dir makes it so concurrent tests + * can stamp on each other by mistake. This check is for references to the 'tmp'. This is a + * strange place for this test but I want somewhere where the configuration is full -- filed w/ + * hdfs and mapreduce configurations. */ private void checkForReferencesToTmpDir() { Configuration conf = TEST_UTIL.getConfiguration(); @@ -127,8 +127,7 @@ public void setUp() throws Exception { admin.snapshot(emptySnapshotName, tableName); // Add some rows - SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, - TestExportSnapshot.FAMILY); + SnapshotTestingUtils.loadData(TEST_UTIL, tableName, 50, TestExportSnapshot.FAMILY); tableNumFiles = admin.getRegions(tableName).size(); // take a snapshot @@ -152,9 +151,8 @@ public void testExportRetry() throws Exception { conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 2); conf.setInt("mapreduce.map.maxattempts", 3); - TestExportSnapshot.testExportFileSystemState(conf, tableName, - Bytes.toBytes(snapshotName), Bytes.toBytes(snapshotName), - tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, + TestExportSnapshot.testExportFileSystemState(conf, tableName, Bytes.toBytes(snapshotName), + Bytes.toBytes(snapshotName), tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, true); } @@ -170,8 +168,8 @@ public void testExportFailure() throws Exception { conf.setBoolean(ExportSnapshot.Testing.CONF_TEST_FAILURE, true); conf.setInt(ExportSnapshot.Testing.CONF_TEST_FAILURE_COUNT, 4); conf.setInt("mapreduce.map.maxattempts", 3); - TestExportSnapshot.testExportFileSystemState(conf, tableName, - Bytes.toBytes(snapshotName), Bytes.toBytes(snapshotName), - tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, null, false); + TestExportSnapshot.testExportFileSystemState(conf, tableName, Bytes.toBytes(snapshotName), + Bytes.toBytes(snapshotName), tableNumFiles, TEST_UTIL.getDefaultRootDirPath(), copyDir, true, + null, false); } } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java index d104d830985b..faa1094147ce 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotHelpers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ /** * Test Export Snapshot Tool helpers */ -@Category({RegionServerTests.class, SmallTests.class}) +@Category({ RegionServerTests.class, SmallTests.class }) public class TestExportSnapshotHelpers { @ClassRule @@ -42,44 +42,39 @@ public class TestExportSnapshotHelpers { HBaseClassTestRule.forClass(TestExportSnapshotHelpers.class); /** - * Verfy the result of getBalanceSplits() method. - * The result are groups of files, used as input list for the "export" mappers. - * All the groups should have similar amount of data. - * - * The input list is a pair of file path and length. - * The getBalanceSplits() function sort it by length, - * and assign to each group a file, going back and forth through the groups. + * Verfy the result of getBalanceSplits() method. The result are groups of files, used as input + * list for the "export" mappers. All the groups should have similar amount of data. The input + * list is a pair of file path and length. The getBalanceSplits() function sort it by length, and + * assign to each group a file, going back and forth through the groups. */ @Test public void testBalanceSplit() throws Exception { // Create a list of files List> files = new ArrayList<>(21); for (long i = 0; i <= 20; i++) { - SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder() - .setType(SnapshotFileInfo.Type.HFILE) - .setHfile("file-" + i) - .build(); + SnapshotFileInfo fileInfo = SnapshotFileInfo.newBuilder().setType(SnapshotFileInfo.Type.HFILE) + .setHfile("file-" + i).build(); files.add(new Pair<>(fileInfo, i)); } // Create 5 groups (total size 210) - // group 0: 20, 11, 10, 1 (total size: 42) - // group 1: 19, 12, 9, 2 (total size: 42) - // group 2: 18, 13, 8, 3 (total size: 42) - // group 3: 17, 12, 7, 4 (total size: 42) - // group 4: 16, 11, 6, 5 (total size: 42) + // group 0: 20, 11, 10, 1 (total size: 42) + // group 1: 19, 12, 9, 2 (total size: 42) + // group 2: 18, 13, 8, 3 (total size: 42) + // group 3: 17, 12, 7, 4 (total size: 42) + // group 4: 16, 11, 6, 5 (total size: 42) List>> splits = ExportSnapshot.getBalancedSplits(files, 5); assertEquals(5, splits.size()); - String[] split0 = new String[] {"file-20", "file-11", "file-10", "file-1", "file-0"}; + String[] split0 = new String[] { "file-20", "file-11", "file-10", "file-1", "file-0" }; verifyBalanceSplit(splits.get(0), split0, 42); - String[] split1 = new String[] {"file-19", "file-12", "file-9", "file-2"}; + String[] split1 = new String[] { "file-19", "file-12", "file-9", "file-2" }; verifyBalanceSplit(splits.get(1), split1, 42); - String[] split2 = new String[] {"file-18", "file-13", "file-8", "file-3"}; + String[] split2 = new String[] { "file-18", "file-13", "file-8", "file-3" }; verifyBalanceSplit(splits.get(2), split2, 42); - String[] split3 = new String[] {"file-17", "file-14", "file-7", "file-4"}; + String[] split3 = new String[] { "file-17", "file-14", "file-7", "file-4" }; verifyBalanceSplit(splits.get(3), split3, 42); - String[] split4 = new String[] {"file-16", "file-15", "file-6", "file-5"}; + String[] split4 = new String[] { "file-16", "file-15", "file-6", "file-5" }; verifyBalanceSplit(splits.get(4), split4, 42); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java index 2ab9f11adef8..3bb641d13f9b 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV1NoCluster.java @@ -41,12 +41,11 @@ import org.slf4j.LoggerFactory; /** - * Test Export Snapshot Tool - * Tests V1 snapshots only. Used to ALSO test v2 but strange failure so separate the tests. - * See companion file for test of v2 snapshot. + * Test Export Snapshot Tool Tests V1 snapshots only. Used to ALSO test v2 but strange failure so + * separate the tests. See companion file for test of v2 snapshot. * @see TestExportSnapshotV2NoCluster */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestExportSnapshotV1NoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -88,36 +87,36 @@ static Path setup(FileSystem fs, HBaseCommonTestingUtility hctu) throws IOExcept */ @Test public void testSnapshotWithRefsExportFileSystemState() throws Exception { - final SnapshotMock snapshotMock = new SnapshotMock(testUtil.getConfiguration(), - this.fs, testDir); - final SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV1("tableWithRefsV1", - "tableWithRefsV1"); + final SnapshotMock snapshotMock = + new SnapshotMock(testUtil.getConfiguration(), this.fs, testDir); + final SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV1("tableWithRefsV1", "tableWithRefsV1"); testSnapshotWithRefsExportFileSystemState(this.fs, builder, testUtil, testDir); } /** - * Generates a couple of regions for the specified SnapshotMock, - * and then it will run the export and verification. + * Generates a couple of regions for the specified SnapshotMock, and then it will run the export + * and verification. */ static void testSnapshotWithRefsExportFileSystemState(FileSystem fs, - SnapshotMock.SnapshotBuilder builder, HBaseCommonTestingUtility testUtil, Path testDir) - throws Exception { + SnapshotMock.SnapshotBuilder builder, HBaseCommonTestingUtility testUtil, Path testDir) + throws Exception { Path[] r1Files = builder.addRegion(); Path[] r2Files = builder.addRegion(); builder.commit(); int snapshotFilesCount = r1Files.length + r2Files.length; byte[] snapshotName = Bytes.toBytes(builder.getSnapshotDescription().getName()); TableName tableName = builder.getTableDescriptor().getTableName(); - TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), - tableName, snapshotName, snapshotName, snapshotFilesCount, - testDir, getDestinationDir(fs, testUtil, testDir), false, null, true); + TestExportSnapshot.testExportFileSystemState(testUtil.getConfiguration(), tableName, + snapshotName, snapshotName, snapshotFilesCount, testDir, + getDestinationDir(fs, testUtil, testDir), false, null, true); } static Path getDestinationDir(FileSystem fs, HBaseCommonTestingUtility hctu, Path testDir) throws IOException { - Path path = new Path(new Path(testDir, "export-test"), - "export-" + EnvironmentEdgeManager.currentTime()).makeQualified(fs.getUri(), - fs.getWorkingDirectory()); + Path path = + new Path(new Path(testDir, "export-test"), "export-" + EnvironmentEdgeManager.currentTime()) + .makeQualified(fs.getUri(), fs.getWorkingDirectory()); LOG.info("Export destination={}, fs={}, fsurl={}, fswd={}, testDir={}", path, fs, fs.getUri(), fs.getWorkingDirectory(), testDir); return path; diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java index 4cd1dfdfb71a..08f8a5d682be 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotV2NoCluster.java @@ -16,7 +16,9 @@ * limitations under the License. */ package org.apache.hadoop.hbase.snapshot; + import static org.junit.Assert.assertTrue; + import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; @@ -36,7 +38,7 @@ * Test Export Snapshot Tool; tests v2 snapshots. * @see TestExportSnapshotV1NoCluster */ -@Category({MapReduceTests.class, MediumTests.class}) +@Category({ MapReduceTests.class, MediumTests.class }) public class TestExportSnapshotV2NoCluster { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -60,9 +62,9 @@ public void before() throws Exception { @Test public void testSnapshotWithRefsExportFileSystemState() throws Exception { final SnapshotMock snapshotMock = new SnapshotMock(testUtil.getConfiguration(), - testDir.getFileSystem(testUtil.getConfiguration()), testDir); - final SnapshotMock.SnapshotBuilder builder = snapshotMock.createSnapshotV2("tableWithRefsV2", - "tableWithRefsV2"); + testDir.getFileSystem(testUtil.getConfiguration()), testDir); + final SnapshotMock.SnapshotBuilder builder = + snapshotMock.createSnapshotV2("tableWithRefsV2", "tableWithRefsV2"); TestExportSnapshotV1NoCluster.testSnapshotWithRefsExportFileSystemState(this.fs, builder, this.testUtil, this.testDir); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java index 5560555e9f33..0e21f9c6b9d5 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestExportSnapshotWithTemporaryDirectory.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - *

    - * http://www.apache.org/licenses/LICENSE-2.0 - *

    + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -31,7 +31,7 @@ import org.junit.experimental.categories.Category; @Ignore // HBASE-24493 -@Category({MediumTests.class}) +@Category({ MediumTests.class }) public class TestExportSnapshotWithTemporaryDirectory extends TestExportSnapshot { @ClassRule @@ -54,8 +54,8 @@ public static void setUpBaseConf(Configuration conf) { Path tmpDir = null; try { FileSystem localFs = FileSystem.getLocal(conf); - tmpDir = TEST_UTIL.getDataTestDir(UUID.randomUUID().toString()). - makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); + tmpDir = TEST_UTIL.getDataTestDir(UUID.randomUUID().toString()) + .makeQualified(localFs.getUri(), localFs.getWorkingDirectory()); } catch (IOException ioe) { throw new RuntimeException(ioe); } diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java index 4f0d3deebe20..dc92c03c4f65 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ * Test Export Snapshot Tool */ @Ignore // HBASE-24493 -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestMobExportSnapshot extends TestExportSnapshot { @ClassRule diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java index 484f88afecf4..c57dea3cd36e 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestMobSecureExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ /** * Reruns TestMobExportSnapshot using MobExportSnapshot in secure mode. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestMobSecureExportSnapshot extends TestMobExportSnapshot { @ClassRule diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java index ce1c4cb39a04..9d813a173724 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/snapshot/TestSecureExportSnapshot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,7 +31,7 @@ /** * Reruns TestExportSnapshot using ExportSnapshot in secure mode. */ -@Category({VerySlowRegionServerTests.class, LargeTests.class}) +@Category({ VerySlowRegionServerTests.class, LargeTests.class }) public class TestSecureExportSnapshot extends TestExportSnapshot { @ClassRule diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java index 5e61a4b1268e..baaa3211d018 100644 --- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java +++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/util/LoadTestTool.java @@ -1,18 +1,19 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package org.apache.hadoop.hbase.util; @@ -68,8 +69,8 @@ /** * A command-line utility that reads, writes, and verifies data. Unlike - * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, - * and supports simultaneously writing and reading the same set of keys. + * {@link org.apache.hadoop.hbase.PerformanceEvaluation}, this tool validates the data written, and + * supports simultaneously writing and reading the same set of keys. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.TOOLS) public class LoadTestTool extends AbstractHBaseTool { @@ -94,23 +95,21 @@ public class LoadTestTool extends AbstractHBaseTool { /** Usage string for the load option */ protected static final String OPT_USAGE_LOAD = - ":" + - "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; + ":" + "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; /** Usage string for the read option */ protected static final String OPT_USAGE_READ = "[:<#threads=" + DEFAULT_NUM_THREADS + ">]"; /** Usage string for the update option */ - protected static final String OPT_USAGE_UPDATE = - "[:<#threads=" + DEFAULT_NUM_THREADS - + ">][:<#whether to ignore nonce collisions=0>]"; + protected static final String OPT_USAGE_UPDATE = "[:<#threads=" + + DEFAULT_NUM_THREADS + ">][:<#whether to ignore nonce collisions=0>]"; - protected static final String OPT_USAGE_BLOOM = "Bloom filter type, one of " + - Arrays.toString(BloomType.values()); + protected static final String OPT_USAGE_BLOOM = + "Bloom filter type, one of " + Arrays.toString(BloomType.values()); - protected static final String OPT_USAGE_COMPRESSION = "Compression type, " + - "one of " + Arrays.toString(Compression.Algorithm.values()); + protected static final String OPT_USAGE_COMPRESSION = + "Compression type, " + "one of " + Arrays.toString(Compression.Algorithm.values()); protected static final String OPT_VERBOSE = "verbose"; @@ -121,8 +120,8 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_DEFERRED_LOG_FLUSH_USAGE = "Enable deferred log flush."; public static final String OPT_INMEMORY = "in_memory"; - public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + - "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; + public static final String OPT_USAGE_IN_MEMORY = "Tries to keep the HFiles of the CF " + + "inmemory as far as possible. Not guaranteed that reads are always served from inmemory"; public static final String OPT_GENERATOR = "generator"; public static final String OPT_GENERATOR_USAGE = "The class which generates load for the tool." @@ -157,12 +156,12 @@ public class LoadTestTool extends AbstractHBaseTool { public static final String OPT_ENCRYPTION = "encryption"; protected static final String OPT_ENCRYPTION_USAGE = - "Enables transparent encryption on the test table, one of " + - Arrays.toString(Encryption.getSupportedCiphers()); + "Enables transparent encryption on the test table, one of " + + Arrays.toString(Encryption.getSupportedCiphers()); public static final String OPT_NUM_REGIONS_PER_SERVER = "num_regions_per_server"; - protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE - = "Desired number of regions per region server. Defaults to 5."; + protected static final String OPT_NUM_REGIONS_PER_SERVER_USAGE = + "Desired number of regions per region server. Defaults to 5."; public static int DEFAULT_NUM_REGIONS_PER_SERVER = 5; public static final String OPT_REGION_REPLICATION = "region_replication"; @@ -222,7 +221,7 @@ public class LoadTestTool extends AbstractHBaseTool { private String superUser; private String userNames; - //This file is used to read authentication information in secure clusters. + // This file is used to read authentication information in secure clusters. private String authnFileName; private int numRegionsPerServer = DEFAULT_NUM_REGIONS_PER_SERVER; @@ -232,21 +231,19 @@ public class LoadTestTool extends AbstractHBaseTool { private int mobThreshold = -1; // not set // TODO: refactor LoadTestToolImpl somewhere to make the usage from tests less bad, - // console tool itself should only be used from console. + // console tool itself should only be used from console. protected boolean isSkipInit = false; protected boolean isInitOnly = false; protected Cipher cipher = null; - protected String[] splitColonSeparated(String option, - int minNumCols, int maxNumCols) { + protected String[] splitColonSeparated(String option, int minNumCols, int maxNumCols) { String optVal = cmd.getOptionValue(option); String[] cols = optVal.split(COLON); if (cols.length < minNumCols || cols.length > maxNumCols) { - throw new IllegalArgumentException("Expected at least " - + minNumCols + " columns but no more than " + maxNumCols + - " in the colon-separated value '" + optVal + "' of the " + - "-" + option + " option"); + throw new IllegalArgumentException("Expected at least " + minNumCols + + " columns but no more than " + maxNumCols + " in the colon-separated value '" + optVal + + "' of the " + "-" + option + " option"); } return cols; } @@ -260,11 +257,10 @@ public byte[][] getColumnFamilies() { } /** - * Apply column family options such as Bloom filters, compression, and data - * block encoding. + * Apply column family options such as Bloom filters, compression, and data block encoding. */ - protected void applyColumnFamilyOptions(TableName tableName, - byte[][] columnFamilies) throws IOException { + protected void applyColumnFamilyOptions(TableName tableName, byte[][] columnFamilies) + throws IOException { try (Connection conn = ConnectionFactory.createConnection(conf); Admin admin = conn.getAdmin()) { TableDescriptor tableDesc = admin.getDescriptor(tableName); @@ -273,9 +269,9 @@ protected void applyColumnFamilyOptions(TableName tableName, for (byte[] cf : columnFamilies) { ColumnFamilyDescriptor columnDesc = tableDesc.getColumnFamily(cf); boolean isNewCf = columnDesc == null; - ColumnFamilyDescriptorBuilder columnDescBuilder = isNewCf ? - ColumnFamilyDescriptorBuilder.newBuilder(cf) : - ColumnFamilyDescriptorBuilder.newBuilder(columnDesc); + ColumnFamilyDescriptorBuilder columnDescBuilder = + isNewCf ? ColumnFamilyDescriptorBuilder.newBuilder(cf) + : ColumnFamilyDescriptorBuilder.newBuilder(columnDesc); if (bloomType != null) { columnDescBuilder.setBloomFilterType(bloomType); } @@ -292,11 +288,8 @@ protected void applyColumnFamilyOptions(TableName tableName, byte[] keyBytes = new byte[cipher.getKeyLength()]; Bytes.secureRandom(keyBytes); columnDescBuilder.setEncryptionType(cipher.getName()); - columnDescBuilder.setEncryptionKey( - EncryptionUtil.wrapKey(conf, - User.getCurrent().getShortName(), - new SecretKeySpec(keyBytes, - cipher.getName()))); + columnDescBuilder.setEncryptionKey(EncryptionUtil.wrapKey(conf, + User.getCurrent().getShortName(), new SecretKeySpec(keyBytes, cipher.getName()))); } if (mobThreshold >= 0) { columnDescBuilder.setMobEnabled(true); @@ -317,8 +310,8 @@ protected void applyColumnFamilyOptions(TableName tableName, @Override protected void addOptions() { addOptNoArg("v", OPT_VERBOSE, "Will display a full readout of logs, including ZooKeeper"); - addOptWithArg(OPT_ZK_QUORUM, "ZK quorum as comma-separated host names " + - "without port numbers"); + addOptWithArg(OPT_ZK_QUORUM, + "ZK quorum as comma-separated host names " + "without port numbers"); addOptWithArg(OPT_ZK_PARENT_NODE, "name of parent znode in zookeeper"); addOptWithArg(OPT_TABLE_NAME, "The name of the table to read or write"); addOptWithArg(OPT_COLUMN_FAMILIES, "The name of the column families to use separated by comma"); @@ -329,20 +322,23 @@ protected void addOptions() { addOptWithArg(OPT_BLOOM, OPT_USAGE_BLOOM); addOptWithArg(OPT_BLOOM_PARAM, "the parameter of bloom filter type"); addOptWithArg(OPT_COMPRESSION, OPT_USAGE_COMPRESSION); - addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); - addOptWithArg(OPT_MAX_READ_ERRORS, "The maximum number of read errors " + - "to tolerate before terminating all reader threads. The default is " + - MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); - addOptWithArg(OPT_MULTIGET, "Whether to use multi-gets as opposed to " + - "separate gets for every column in a row"); - addOptWithArg(OPT_KEY_WINDOW, "The 'key window' to maintain between " + - "reads and writes for concurrent write/read workload. The default " + - "is " + MultiThreadedReader.DEFAULT_KEY_WINDOW + "."); - - addOptNoArg(OPT_MULTIPUT, "Whether to use multi-puts as opposed to " + - "separate puts for every column in a row"); - addOptNoArg(OPT_BATCHUPDATE, "Whether to use batch as opposed to " + - "separate updates for every column in a row"); + addOptWithArg(HFileTestUtil.OPT_DATA_BLOCK_ENCODING, + HFileTestUtil.OPT_DATA_BLOCK_ENCODING_USAGE); + addOptWithArg(OPT_MAX_READ_ERRORS, + "The maximum number of read errors " + + "to tolerate before terminating all reader threads. The default is " + + MultiThreadedReader.DEFAULT_MAX_ERRORS + "."); + addOptWithArg(OPT_MULTIGET, + "Whether to use multi-gets as opposed to " + "separate gets for every column in a row"); + addOptWithArg(OPT_KEY_WINDOW, + "The 'key window' to maintain between " + + "reads and writes for concurrent write/read workload. The default " + "is " + + MultiThreadedReader.DEFAULT_KEY_WINDOW + "."); + + addOptNoArg(OPT_MULTIPUT, + "Whether to use multi-puts as opposed to " + "separate puts for every column in a row"); + addOptNoArg(OPT_BATCHUPDATE, + "Whether to use batch as opposed to " + "separate updates for every column in a row"); addOptNoArg(OPT_INMEMORY, OPT_USAGE_IN_MEMORY); addOptWithArg(OPT_GENERATOR, OPT_GENERATOR_USAGE); addOptWithArg(OPT_WRITER, OPT_WRITER_USAGE); @@ -350,11 +346,9 @@ protected void addOptions() { addOptWithArg(OPT_READER, OPT_READER_USAGE); addOptWithArg(OPT_NUM_KEYS, "The number of keys to read/write"); - addOptWithArg(OPT_START_KEY, "The first key to read/write " + - "(a 0-based index). The default value is " + - DEFAULT_START_KEY + "."); - addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table " - + "already exists"); + addOptWithArg(OPT_START_KEY, "The first key to read/write " + + "(a 0-based index). The default value is " + DEFAULT_START_KEY + "."); + addOptNoArg(OPT_SKIP_INIT, "Skip the initialization; assume test table " + "already exists"); addOptWithArg(NUM_TABLES, "A positive integer number. When a number n is specified, load test " @@ -379,9 +373,8 @@ public CommandLine parse(Options opts, String[] args, Properties props, boolean throws ParseException { CommandLine cl = super.parse(opts, args, props, stop); - boolean isReadWriteUpdate = cmd.hasOption(OPT_READ) - || cmd.hasOption(OPT_WRITE) - || cmd.hasOption(OPT_UPDATE); + boolean isReadWriteUpdate = + cmd.hasOption(OPT_READ) || cmd.hasOption(OPT_WRITE) || cmd.hasOption(OPT_UPDATE); boolean isInitOnly = cmd.hasOption(OPT_INIT_ONLY); if (!isInitOnly && !isReadWriteUpdate) { @@ -407,8 +400,7 @@ public CommandLine parse(Options opts, String[] args, Properties props, boolean protected void processOptions(CommandLine cmd) { this.cmd = cmd; - tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, - DEFAULT_TABLE_NAME)); + tableName = TableName.valueOf(cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME)); if (cmd.hasOption(OPT_COLUMN_FAMILIES)) { String[] list = cmd.getOptionValue(OPT_COLUMN_FAMILIES).split(","); @@ -428,10 +420,9 @@ protected void processOptions(CommandLine cmd) { deferredLogFlush = cmd.hasOption(OPT_DEFERRED_LOG_FLUSH); if (!isInitOnly) { - startKey = parseLong(cmd.getOptionValue(OPT_START_KEY, - String.valueOf(DEFAULT_START_KEY)), 0, Long.MAX_VALUE); - long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1, - Long.MAX_VALUE - startKey); + startKey = parseLong(cmd.getOptionValue(OPT_START_KEY, String.valueOf(DEFAULT_START_KEY)), 0, + Long.MAX_VALUE); + long numKeys = parseLong(cmd.getOptionValue(OPT_NUM_KEYS), 1, Long.MAX_VALUE - startKey); endKey = startKey + numKeys; isSkipInit = cmd.hasOption(OPT_SKIP_INIT); System.out.println("Key range: [" + startKey + ".." + (endKey - 1) + "]"); @@ -445,8 +436,7 @@ protected void processOptions(CommandLine cmd) { int colIndex = 0; minColsPerKey = 1; maxColsPerKey = 2 * Integer.parseInt(writeOpts[colIndex++]); - int avgColDataSize = - parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE); + int avgColDataSize = parseInt(writeOpts[colIndex++], 1, Integer.MAX_VALUE); minColDataSize = avgColDataSize / 2; maxColDataSize = avgColDataSize * 3 / 2; @@ -462,10 +452,8 @@ protected void processOptions(CommandLine cmd) { } System.out.println("Multi-puts: " + isMultiPut); - System.out.println("Columns per key: " + minColsPerKey + ".." - + maxColsPerKey); - System.out.println("Data size per column: " + minColDataSize + ".." - + maxColDataSize); + System.out.println("Columns per key: " + minColsPerKey + ".." + maxColsPerKey); + System.out.println("Data size per column: " + minColDataSize + ".." + maxColDataSize); } if (isUpdate) { @@ -496,18 +484,15 @@ protected void processOptions(CommandLine cmd) { } if (cmd.hasOption(OPT_MAX_READ_ERRORS)) { - maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS), - 0, Integer.MAX_VALUE); + maxReadErrors = parseInt(cmd.getOptionValue(OPT_MAX_READ_ERRORS), 0, Integer.MAX_VALUE); } if (cmd.hasOption(OPT_KEY_WINDOW)) { - keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW), - 0, Integer.MAX_VALUE); + keyWindow = parseInt(cmd.getOptionValue(OPT_KEY_WINDOW), 0, Integer.MAX_VALUE); } if (cmd.hasOption(OPT_MULTIGET)) { - multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET), - 0, Integer.MAX_VALUE); + multiGetBatchSize = parseInt(cmd.getOptionValue(OPT_MULTIGET), 0, Integer.MAX_VALUE); } System.out.println("Multi-gets (value of 1 means no multigets): " + multiGetBatchSize); @@ -538,16 +523,15 @@ protected void processOptions(CommandLine cmd) { private void parseColumnFamilyOptions(CommandLine cmd) { String dataBlockEncodingStr = cmd.getOptionValue(HFileTestUtil.OPT_DATA_BLOCK_ENCODING); - dataBlockEncodingAlgo = dataBlockEncodingStr == null ? null : - DataBlockEncoding.valueOf(dataBlockEncodingStr); + dataBlockEncodingAlgo = + dataBlockEncodingStr == null ? null : DataBlockEncoding.valueOf(dataBlockEncodingStr); String compressStr = cmd.getOptionValue(OPT_COMPRESSION); - compressAlgo = compressStr == null ? Compression.Algorithm.NONE : - Compression.Algorithm.valueOf(compressStr); + compressAlgo = compressStr == null ? Compression.Algorithm.NONE + : Compression.Algorithm.valueOf(compressStr); String bloomStr = cmd.getOptionValue(OPT_BLOOM); - bloomType = bloomStr == null ? BloomType.ROW : - BloomType.valueOf(bloomStr); + bloomType = bloomStr == null ? BloomType.ROW : BloomType.valueOf(bloomStr); if (bloomType == BloomType.ROWPREFIX_FIXED_LENGTH) { if (!cmd.hasOption(OPT_BLOOM_PARAM)) { @@ -570,9 +554,8 @@ public void initTestTable() throws IOException { durability = Durability.ASYNC_WAL; } - HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName, - getColumnFamilies(), compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer, - regionReplication, durability); + HBaseTestingUtility.createPreSplitLoadTestTable(conf, tableName, getColumnFamilies(), + compressAlgo, dataBlockEncodingAlgo, numRegionsPerServer, regionReplication, durability); applyColumnFamilyOptions(tableName, getColumnFamilies()); } @@ -634,8 +617,8 @@ protected int loadTable() throws IOException { userOwner = User.createUserForTesting(conf, superUser, new String[0]); } } else { - args = clazzAndArgs.length == 1 ? new String[0] : Arrays.copyOfRange(clazzAndArgs, 1, - clazzAndArgs.length); + args = clazzAndArgs.length == 1 ? new String[0] + : Arrays.copyOfRange(clazzAndArgs, 1, clazzAndArgs.length); } dataGen.initialize(args); } else { @@ -646,15 +629,14 @@ protected int loadTable() throws IOException { if (userOwner != null) { LOG.info("Granting permissions for user " + userOwner.getShortName()); - Permission.Action[] actions = { - Permission.Action.ADMIN, Permission.Action.CREATE, - Permission.Action.READ, Permission.Action.WRITE }; + Permission.Action[] actions = { Permission.Action.ADMIN, Permission.Action.CREATE, + Permission.Action.READ, Permission.Action.WRITE }; try { - AccessControlClient.grant(ConnectionFactory.createConnection(conf), - tableName, userOwner.getShortName(), null, null, actions); + AccessControlClient.grant(ConnectionFactory.createConnection(conf), tableName, + userOwner.getShortName(), null, null, actions); } catch (Throwable e) { - LOG.error(HBaseMarkers.FATAL, "Error in granting permission for the user " + - userOwner.getShortName(), e); + LOG.error(HBaseMarkers.FATAL, + "Error in granting permission for the user " + userOwner.getShortName(), e); return EXIT_FAILURE; } } @@ -707,8 +689,8 @@ protected int loadTable() throws IOException { if (isRead) { if (userOwner != null) { - readerThreads = new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent, - userNames); + readerThreads = + new MultiThreadedReaderWithACL(dataGen, conf, tableName, verifyPercent, userNames); } else { String readerClass = null; if (cmd.hasOption(OPT_READER)) { @@ -725,14 +707,12 @@ protected int loadTable() throws IOException { } if (isUpdate && isWrite) { - LOG.info("Concurrent write/update workload: making updaters aware of the " + - "write point"); + LOG.info("Concurrent write/update workload: making updaters aware of the " + "write point"); updaterThreads.linkToWriter(writerThreads); } if (isRead && (isUpdate || isWrite)) { - LOG.info("Concurrent write/read workload: making readers aware of the " + - "write point"); + LOG.info("Concurrent write/read workload: making readers aware of the " + "write point"); readerThreads.linkToWriter(isUpdate ? updaterThreads : writerThreads); } @@ -783,46 +763,46 @@ protected int loadTable() throws IOException { private LoadTestDataGenerator getLoadGeneratorInstance(String clazzName) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor(int.class, int.class, int.class, int.class, - byte[][].class); + Constructor constructor = + clazz.getConstructor(int.class, int.class, int.class, int.class, byte[][].class); return (LoadTestDataGenerator) constructor.newInstance(minColDataSize, maxColDataSize, - minColsPerKey, maxColsPerKey, families); + minColsPerKey, maxColsPerKey, families); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedWriter getMultiThreadedWriterInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class); + Constructor constructor = + clazz.getConstructor(LoadTestDataGenerator.class, Configuration.class, TableName.class); return (MultiThreadedWriter) constructor.newInstance(dataGen, conf, tableName); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedUpdater getMultiThreadedUpdaterInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); - return (MultiThreadedUpdater) constructor.newInstance( - dataGen, conf, tableName, updatePercent); + Constructor constructor = clazz.getConstructor(LoadTestDataGenerator.class, + Configuration.class, TableName.class, double.class); + return (MultiThreadedUpdater) constructor.newInstance(dataGen, conf, tableName, + updatePercent); } catch (Exception e) { throw new IOException(e); } } - private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName - , LoadTestDataGenerator dataGen) throws IOException { + private MultiThreadedReader getMultiThreadedReaderInstance(String clazzName, + LoadTestDataGenerator dataGen) throws IOException { try { Class clazz = Class.forName(clazzName); - Constructor constructor = clazz.getConstructor( - LoadTestDataGenerator.class, Configuration.class, TableName.class, double.class); + Constructor constructor = clazz.getConstructor(LoadTestDataGenerator.class, + Configuration.class, TableName.class, double.class); return (MultiThreadedReader) constructor.newInstance(dataGen, conf, tableName, verifyPercent); } catch (Exception e) { throw new IOException(e); @@ -834,15 +814,12 @@ public static void main(String[] args) { } /** - * When NUM_TABLES is specified, the function starts multiple worker threads - * which individually start a LoadTestTool instance to load a table. Each - * table name is in format <tn>_<index>. For example, "-tn test -num_tables 2" - * , table names will be "test_1", "test_2" - * + * When NUM_TABLES is specified, the function starts multiple worker threads which individually + * start a LoadTestTool instance to load a table. Each table name is in format <tn>_<index>. + * For example, "-tn test -num_tables 2" , table names will be "test_1", "test_2" * @throws IOException if one of the load tasks is unable to complete */ - private int parallelLoadTables() - throws IOException { + private int parallelLoadTables() throws IOException { // create new command args String tableName = cmd.getOptionValue(OPT_TABLE_NAME, DEFAULT_TABLE_NAME); String[] newArgs = null; @@ -869,7 +846,7 @@ private int parallelLoadTables() List workers = new ArrayList<>(); for (int i = 0; i < numTables; i++) { String[] workerArgs = newArgs.clone(); - workerArgs[tableNameValueIndex] = tableName + "_" + (i+1); + workerArgs[tableNameValueIndex] = tableName + "_" + (i + 1); WorkerThread worker = new WorkerThread(i, workerArgs); workers.add(worker); LOG.info(worker + " starting"); diff --git a/hbase-metrics-api/pom.xml b/hbase-metrics-api/pom.xml index 7865b2371368..ff086a6b87e1 100644 --- a/hbase-metrics-api/pom.xml +++ b/hbase-metrics-api/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,50 +31,6 @@ Apache HBase - Metrics API HBase Metrics API descriptions - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - @@ -149,6 +105,50 @@ + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + + test + + test + + true + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java index 6e041590ee3a..78d9ade04236 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Counter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ public interface Counter extends Metric { /** * Increment {@code this} by {@code n}. - * * @param n The amount to increment. */ void increment(long n); @@ -47,7 +46,6 @@ public interface Counter extends Metric { /** * Decrement {@code this} by {@code n}. - * * @param n The amount to decrement. */ void decrement(long n); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java index ba171c2cab24..b20da2426296 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Gauge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ /** * A metrics which measures a discrete value. - * * @param The value of the Gauge. */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java index 891bc6df2ea1..da4ff89c59b7 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Histogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,14 +30,12 @@ public interface Histogram extends Metric { /** * Adds a new value to the distribution. - * * @param value The value to add */ void update(int value); /** * Adds a new value to the distribution. - * * @param value The value to add */ void update(long value); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java index 5f38a005b3e1..9217a2af4a4e 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Meter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,6 @@ public interface Meter extends Metric { /** * Records {@code events} occurrences. - * * @param events Number of occurrences to record. */ void mark(long events); @@ -53,14 +52,13 @@ public interface Meter extends Metric { double getMeanRate(); /** - * Returns the one-minute exponentially-weighted moving average rate at which events have - * occurred since the meter was created. + * Returns the one-minute exponentially-weighted moving average rate at which events have occurred + * since the meter was created. *

    * This rate has the same exponential decay factor as the one-minute load average in the {@code * top} Unix command. - * - * @return the one-minute exponentially-weighted moving average rate at which events have - * occurred since the meter was created + * @return the one-minute exponentially-weighted moving average rate at which events have occurred + * since the meter was created */ double getOneMinuteRate(); @@ -70,7 +68,6 @@ public interface Meter extends Metric { *

    * This rate has the same exponential decay factor as the five-minute load average in the {@code * top} Unix command. - * * @return the five-minute exponentially-weighted moving average rate at which events have * occurred since the meter was created */ @@ -82,7 +79,6 @@ public interface Meter extends Metric { *

    * This rate has the same exponential decay factor as the fifteen-minute load average in the * {@code top} Unix command. - * * @return the fifteen-minute exponentially-weighted moving average rate at which events have * occurred since the meter was created */ diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java index 2f6d49e01fc9..e79a9f3631ac 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Metric.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java index 33e989cfe015..9e7b13d89c8b 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistries.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Collection; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -53,8 +50,8 @@ public static MetricRegistries global() { public abstract void clear(); /** - * Create or return MetricRegistry with the given info. MetricRegistry will only be created - * if current reference count is 0. Otherwise ref counted is incremented, and an existing instance + * Create or return MetricRegistry with the given info. MetricRegistry will only be created if + * current reference count is 0. Otherwise ref counted is incremented, and an existing instance * will be returned. * @param info the info object for the MetricRegistrytry. * @return created or existing MetricRegistry. diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java index edc813d95b99..737ab0e5abcb 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistriesLoader.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.ArrayList; import java.util.List; import java.util.ServiceLoader; - import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; @@ -32,8 +29,8 @@ public final class MetricRegistriesLoader { private static final Logger LOG = LoggerFactory.getLogger(MetricRegistries.class); - private static final String defaultClass - = "org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl"; + private static final String defaultClass = + "org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl"; private MetricRegistriesLoader() { } @@ -64,7 +61,7 @@ static MetricRegistries load(List availableImplementations) { return impl; } else if (availableImplementations.isEmpty()) { try { - return ReflectionUtils.newInstance((Class)Class.forName(defaultClass)); + return ReflectionUtils.newInstance((Class) Class.forName(defaultClass)); } catch (ClassNotFoundException e) { throw new RuntimeException(e); } diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java index 3bd5f6cd844c..78179ebcee95 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistry.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics; import java.util.Optional; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -32,7 +31,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Timer} used to measure durations and report rates. - * * @param name the name of the timer. * @return An instance of {@link Timer}. */ @@ -40,7 +38,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Histogram} used to measure a distribution of values. - * * @param name The name of the Histogram. * @return An instance of {@link Histogram}. */ @@ -49,7 +46,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Meter} used to measure durations and report distributions (a * combination of a {@link Timer} and a {@link Histogram}. - * * @param name The name of the Meter. * @return An instance of {@link Meter}. */ @@ -57,7 +53,6 @@ public interface MetricRegistry extends MetricSet { /** * Get or construct a {@link Counter} used to track a mutable number. - * * @param name The name of the Counter * @return An instance of {@link Counter}. */ @@ -96,7 +91,6 @@ public interface MetricRegistry extends MetricSet { /** * Removes the metric with the given name. - * * @param name the name of the metric * @return true if the metric is removed. */ diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java index be77c42985de..9d53a8cbf539 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryFactory.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java index c4396bd24d5a..abac13b8ace5 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricRegistryInfo.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; - import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.yetus.audience.InterfaceAudience; /** * HBase Metrics are grouped in different MetricRegistry'ies. All metrics that correspond to a - * subcomponent (like RPC, GC, WAL) are managed in a single MetricRegistry. - * This class holds the name and description and JMX related context names for such group of - * metrics. + * subcomponent (like RPC, GC, WAL) are managed in a single MetricRegistry. This class holds the + * name and description and JMX related context names for such group of metrics. */ @InterfaceAudience.Private public class MetricRegistryInfo { @@ -37,12 +34,8 @@ public class MetricRegistryInfo { protected final String metricsJmxContext; protected final boolean existingSource; - public MetricRegistryInfo( - String metricsName, - String metricsDescription, - String metricsJmxContext, - String metricsContext, - boolean existingSource) { + public MetricRegistryInfo(String metricsName, String metricsDescription, String metricsJmxContext, + String metricsContext, boolean existingSource) { this.metricsName = metricsName; this.metricsDescription = metricsDescription; this.metricsContext = metricsContext; @@ -51,9 +44,8 @@ public MetricRegistryInfo( } /** - * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. + * Get the metrics context. For hadoop metrics2 system this is usually an all lowercased string. * eg. regionserver, master, thriftserver - * * @return The string context used to register this source to hadoop's metrics2 system. */ public String getMetricsContext() { @@ -68,16 +60,15 @@ public String getMetricsDescription() { } /** - * Get the name of the context in JMX that this source will be exposed through. - * This is in ObjectName format. With the default context being Hadoop -> HBase + * Get the name of the context in JMX that this source will be exposed through. This is in + * ObjectName format. With the default context being Hadoop -> HBase */ public String getMetricsJmxContext() { return metricsJmxContext; } /** - * Get the name of the metrics that are being exported by this source. - * Eg. IPC, GC, WAL + * Get the name of the metrics that are being exported by this source. Eg. IPC, GC, WAL */ public String getMetricsName() { return metricsName; @@ -102,11 +93,7 @@ public boolean equals(Object obj) { @Override public int hashCode() { - return new HashCodeBuilder() - .append(metricsName) - .append(metricsDescription) - .append(metricsContext) - .append(metricsJmxContext) - .toHashCode(); + return new HashCodeBuilder().append(metricsName).append(metricsDescription) + .append(metricsContext).append(metricsJmxContext).toHashCode(); } } diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java index 5e1c873ce8bd..60d7e9e39ddb 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/MetricSet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.metrics; import java.util.Map; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** * A set of named metrics. - * * @see MetricRegistry#registerAll(MetricSet) */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.COPROC) @@ -34,7 +31,6 @@ public interface MetricSet extends Metric { /** * A map of metric names to metrics. - * * @return the metrics */ Map getMetrics(); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java index ecb01ad57c0e..e38302360696 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/PackageMarker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,19 +19,16 @@ import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; - import org.apache.yetus.audience.InterfaceAudience; /** - * This is a dummy annotation that forces javac to produce output for - * otherwise empty package-info.java. - * - *

    The result is maven-compiler-plugin can properly identify the scope of - * changed files - * - *

    See more details in - * - * maven-compiler-plugin: incremental compilation broken + * This is a dummy annotation that forces javac to produce output for otherwise empty + * package-info.java. + *

    + * The result is maven-compiler-plugin can properly identify the scope of changed files + *

    + * See more details in + * maven-compiler-plugin: incremental compilation broken */ @Retention(RetentionPolicy.SOURCE) @InterfaceAudience.Private diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java index a7b9869a0d2d..26aee2804eea 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Snapshot.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,8 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - - package org.apache.hadoop.hbase.metrics; import org.apache.yetus.audience.InterfaceAudience; @@ -43,7 +40,6 @@ public interface Snapshot { /** * Returns the number of values in the snapshot. - * * @return the number of values */ long getCount(); @@ -57,77 +53,66 @@ public interface Snapshot { /** * Returns the value at the 25th percentile in the distribution. - * * @return the value at the 25th percentile */ long get25thPercentile(); /** * Returns the value at the 75th percentile in the distribution. - * * @return the value at the 75th percentile */ long get75thPercentile(); /** * Returns the value at the 90th percentile in the distribution. - * * @return the value at the 90th percentile */ long get90thPercentile(); /** * Returns the value at the 95th percentile in the distribution. - * * @return the value at the 95th percentile */ long get95thPercentile(); /** * Returns the value at the 98th percentile in the distribution. - * * @return the value at the 98th percentile */ long get98thPercentile(); /** * Returns the value at the 99th percentile in the distribution. - * * @return the value at the 99th percentile */ long get99thPercentile(); /** * Returns the value at the 99.9th percentile in the distribution. - * * @return the value at the 99.9th percentile */ long get999thPercentile(); /** * Returns the median value in the distribution. - * * @return the median value */ long getMedian(); /** * Returns the highest value in the snapshot. - * * @return the highest value */ long getMax(); /** * Returns the arithmetic mean of the values in the snapshot. - * * @return the arithmetic mean */ long getMean(); /** * Returns the lowest value in the snapshot. - * * @return the lowest value */ long getMin(); diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java index 30c64fb5ce4b..aeb6adf5163c 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/Timer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; diff --git a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java index e79451fe6ad4..3acaa96d871d 100644 --- a/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java +++ b/hbase-metrics-api/src/main/java/org/apache/hadoop/hbase/metrics/package-info.java @@ -1,19 +1,12 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** diff --git a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java index 59f26999bd2c..85fff81fc9be 100644 --- a/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java +++ b/hbase-metrics-api/src/test/java/org/apache/hadoop/hbase/metrics/TestMetricRegistriesLoader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -51,8 +51,8 @@ public void testLoadMultipleInstances() { MetricRegistries loader1 = mock(MetricRegistries.class); MetricRegistries loader2 = mock(MetricRegistries.class); MetricRegistries loader3 = mock(MetricRegistries.class); - MetricRegistries instance = MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2, - loader3)); + MetricRegistries instance = + MetricRegistriesLoader.load(Lists.newArrayList(loader1, loader2, loader3)); // the load() returns the first instance assertEquals(loader1, instance); diff --git a/hbase-metrics/pom.xml b/hbase-metrics/pom.xml index ee0ec004b94f..96821b2693aa 100644 --- a/hbase-metrics/pom.xml +++ b/hbase-metrics/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,50 +31,6 @@ Apache HBase - Metrics Implementation HBase Metrics Implementation - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - maven-surefire-plugin - - - - secondPartTestsExecution - test - - test - - - true - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -157,6 +113,50 @@ + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + maven-surefire-plugin + + + + secondPartTestsExecution + + test + + test + + true + + + + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + + diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java index 8021c0689398..ad30fbe1674d 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/CounterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics.impl; import java.util.concurrent.atomic.LongAdder; - import org.apache.hadoop.hbase.metrics.Counter; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java index b5c52cf840cc..d2723a22e942 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/DropwizardMeter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,7 @@ package org.apache.hadoop.hbase.metrics.impl; import com.codahale.metrics.Meter; - import java.util.Objects; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -39,11 +37,13 @@ public DropwizardMeter(Meter meter) { this.meter = Objects.requireNonNull(meter); } - @Override public void mark() { + @Override + public void mark() { this.meter.mark(); } - @Override public void mark(long count) { + @Override + public void mark(long count) { this.meter.mark(count); } diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java index 81544607f5f7..f9cc1a6ca2f6 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/FastLongHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ public class FastLongHistogram { public static final int DEFAULT_NBINS = 255; public static final double[] DEFAULT_QUANTILES = - new double[]{0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999}; + new double[] { 0.25, 0.5, 0.75, 0.90, 0.95, 0.98, 0.99, 0.999 }; /** * Bins is a class containing a list of buckets(or bins) for estimation histogram of some data. @@ -105,8 +105,8 @@ private int getIndex(long value) { return this.counts.length - 2; } // compute the position - return 1 + (int) ((value - this.binsMin) * (this.counts.length - 3) / - (this.binsMax - this.binsMin)); + return 1 + (int) ((value - this.binsMin) * (this.counts.length - 3) + / (this.binsMax - this.binsMin)); } diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java index 2e0aa55808f6..c29b267e347a 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/HistogramImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,9 +22,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Custom histogram implementation based on FastLongHistogram. Dropwizard-based histograms are - * slow compared to this implementation, so we are using our implementation here. - * See HBASE-15222. + * Custom histogram implementation based on FastLongHistogram. Dropwizard-based histograms are slow + * compared to this implementation, so we are using our implementation here. See HBASE-15222. */ @InterfaceAudience.Private public class HistogramImpl implements Histogram { diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java index 3826e66093b6..39da41eeec00 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistriesImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -22,7 +21,6 @@ import java.util.Collections; import java.util.Optional; import java.util.Set; - import org.apache.hadoop.hbase.metrics.MetricRegistries; import org.apache.hadoop.hbase.metrics.MetricRegistry; import org.apache.hadoop.hbase.metrics.MetricRegistryFactory; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java index 6f9e16366aa7..5ebdf0d479a0 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryFactoryImpl.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java index 03c669fee0e7..0ecd707b4813 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/MetricRegistryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; - import org.apache.hadoop.hbase.metrics.Counter; import org.apache.hadoop.hbase.metrics.Gauge; import org.apache.hadoop.hbase.metrics.Histogram; @@ -97,7 +96,7 @@ public Metric register(String name, Metric metric) { @Override public Gauge register(String name, Gauge gauge) { - return (Gauge) register(name, (Metric)gauge); + return (Gauge) register(name, (Metric) gauge); } @Override diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java index 63131a100e92..19ec192211a8 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/RefCountingMap.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -23,22 +22,23 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.function.Supplier; import java.util.stream.Collectors; - import org.apache.yetus.audience.InterfaceAudience; /** - * A map of K to V, but does ref counting for added and removed values. The values are - * not added directly, but instead requested from the given Supplier if ref count == 0. Each put() - * call will increment the ref count, and each remove() will decrement it. The values are removed - * from the map iff ref count == 0. + * A map of K to V, but does ref counting for added and removed values. The values are not added + * directly, but instead requested from the given Supplier if ref count == 0. Each put() call will + * increment the ref count, and each remove() will decrement it. The values are removed from the map + * iff ref count == 0. */ @InterfaceAudience.Private class RefCountingMap { private ConcurrentHashMap> map = new ConcurrentHashMap<>(); + private static class Payload { V v; int refCount; + Payload(V v) { this.v = v; this.refCount = 1; // create with ref count = 1 @@ -46,7 +46,7 @@ private static class Payload { } V put(K k, Supplier supplier) { - return ((Payload)map.compute(k, (k1, oldValue) -> { + return ((Payload) map.compute(k, (k1, oldValue) -> { if (oldValue != null) { oldValue.refCount++; return oldValue; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java index 03a8c65915e2..3ad560a3d74f 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/TimerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.metrics.impl; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.metrics.Timer; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/package-info.java b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/package-info.java index 0df119e2ee45..91a3af5b9cb9 100644 --- a/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/package-info.java +++ b/hbase-metrics/src/main/java/org/apache/hadoop/hbase/metrics/impl/package-info.java @@ -1,19 +1,12 @@ /** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license + * agreements. See the NOTICE file distributed with this work for additional information regarding + * copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable + * law or agreed to in writing, software distributed under the License is distributed on an "AS IS" + * BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License + * for the specific language governing permissions and limitations under the License. */ /** diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java index 5b5e26f13a80..651f560c5c7b 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestCounterImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,11 +39,13 @@ public class TestCounterImpl { private Counter counter; - @Before public void setup() { + @Before + public void setup() { this.counter = new CounterImpl(); } - @Test public void testCounting() { + @Test + public void testCounting() { counter.increment(); assertEquals(1L, counter.getCount()); counter.increment(); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java index 072f18a3b155..9f5415c67e37 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestDropwizardMeter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,11 +38,13 @@ public class TestDropwizardMeter { private Meter meter; - @Before public void setup() { + @Before + public void setup() { this.meter = Mockito.mock(Meter.class); } - @Test public void test() { + @Test + public void test() { DropwizardMeter dwMeter = new DropwizardMeter(this.meter); dwMeter.mark(); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java index 120f91169c5a..78c2afc1ec92 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestFastLongHistogram.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; @@ -34,7 +33,7 @@ /** * Testcases for FastLongHistogram. */ -@Category({MiscTests.class, SmallTests.class}) +@Category({ MiscTests.class, SmallTests.class }) public class TestFastLongHistogram { @ClassRule @@ -97,7 +96,6 @@ public void testAdaptionOfChange() { } } - @Test public void testGetNumAtOrBelow() { long[] VALUES = { 1, 10, 20, 30, 40, 50 }; @@ -126,7 +124,6 @@ public void testGetNumAtOrBelow() { assertEquals(601, h.getNumAtOrBelow(Long.MAX_VALUE)); } - @Test public void testSameValues() { FastLongHistogram hist = new FastLongHistogram(100); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java index 52d29fc700cc..e1ed9cf6a5be 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestGauge.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,18 +34,17 @@ public class TestGauge { @ClassRule - public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestGauge.class); + public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule.forClass(TestGauge.class); @Test public void testGetValue() { SimpleGauge gauge = new SimpleGauge(); - assertEquals(0, (long)gauge.getValue()); + assertEquals(0, (long) gauge.getValue()); gauge.setValue(1000L); - assertEquals(1000L, (long)gauge.getValue()); + assertEquals(1000L, (long) gauge.getValue()); } /** @@ -55,7 +54,8 @@ private static class SimpleGauge implements Gauge { private final AtomicLong value = new AtomicLong(0L); - @Override public Long getValue() { + @Override + public Long getValue() { return this.value.get(); } diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java index 9be3fcee20f4..cbce953b69ba 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestHistogramImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java index 1115529a051c..43db251b8bfb 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestMetricRegistryImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public void testCounter() { counter.increment(42L); Optional metric = registry.get("mycounter"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Counter)metric.get()).getCount()); + assertEquals(42L, (long) ((Counter) metric.get()).getCount()); } @Test @@ -72,7 +72,7 @@ public Long getValue() { }); Optional metric = registry.get("mygauge"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); } @Test @@ -81,7 +81,7 @@ public void testRegisterGaugeLambda() { registry.register("gaugeLambda", () -> 42L); Optional metric = registry.get("gaugeLambda"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); } @Test @@ -106,7 +106,7 @@ public void testRegister() { Optional metric = registry.get("mycounter"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Counter)metric.get()).getCount()); + assertEquals(42L, (long) ((Counter) metric.get()).getCount()); } @Test @@ -119,8 +119,7 @@ public void testDoubleRegister() { Optional metric = registry.get("mygauge"); assertTrue(metric.isPresent()); - assertEquals(42L, (long)((Gauge)metric.get()).getValue()); - + assertEquals(42L, (long) ((Gauge) metric.get()).getValue()); Counter c1 = registry.counter("mycounter"); Counter c2 = registry.counter("mycounter"); diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java index c5ed1edb9eb8..6478639cf509 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestRefCountingMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -59,7 +59,7 @@ public void testPutGet() { @Test public void testPutMulti() { String v1 = map.put("foo", () -> "foovalue"); - String v2 = map.put("foo", () -> "foovalue2"); + String v2 = map.put("foo", () -> "foovalue2"); String v3 = map.put("foo", () -> "foovalue3"); String v = map.get("foo"); @@ -127,7 +127,6 @@ public void testClear() { assertEquals(0, map.size()); } - @Test public void testKeySet() { map.put("foo", () -> "foovalue"); @@ -151,6 +150,6 @@ public void testValues() { assertEquals(3, values.size()); Lists.newArrayList("foovalue", "foovalue3", "foovalue4").stream() - .forEach(v -> assertTrue(values.contains(v))); + .forEach(v -> assertTrue(values.contains(v))); } } diff --git a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java index d9d3632b7310..749bd63c7e5a 100644 --- a/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java +++ b/hbase-metrics/src/test/java/org/apache/hadoop/hbase/metrics/impl/TestTimerImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml index f04329ddc216..257970c35a79 100644 --- a/hbase-procedure/pom.xml +++ b/hbase-procedure/pom.xml @@ -1,6 +1,6 @@ - + - 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -30,35 +30,6 @@ hbase-procedure Apache HBase - Procedure Procedure Framework - - - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-assembly-plugin - - true - - - - net.revelc.code - warbucks-maven-plugin - - - org.apache.maven.plugins - maven-checkstyle-plugin - - true - - - - @@ -145,10 +116,39 @@ test + + + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-assembly-plugin + + true + + + + net.revelc.code + warbucks-maven-plugin + + + org.apache.maven.plugins + maven-checkstyle-plugin + + true + + + + - - + + skipProcedureTests @@ -167,8 +167,9 @@ hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java index 7786f552da35..0854109aace8 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/AbstractProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; import java.util.concurrent.locks.ReentrantLock; - import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -69,11 +68,10 @@ public void signalAll() { } // ========================================================================== - // Add related + // Add related // ========================================================================== /** - * Add the procedure to the queue. - * NOTE: this method is called with the sched lock held. + * Add the procedure to the queue. NOTE: this method is called with the sched lock held. * @param procedure the Procedure to add * @param addFront true if the item should be added to the front of the queue */ @@ -131,11 +129,10 @@ protected void push(final Procedure procedure, final boolean addFront, final boo } // ========================================================================== - // Poll related + // Poll related // ========================================================================== /** - * Fetch one Procedure from the queue - * NOTE: this method is called with the sched lock held. + * Fetch one Procedure from the queue NOTE: this method is called with the sched lock held. * @return the Procedure to execute, or null if nothing is available. */ protected abstract Procedure dequeue(); @@ -187,18 +184,18 @@ public Procedure poll(final long nanos) { } // ========================================================================== - // Utils + // Utils // ========================================================================== /** - * Returns the number of elements in this queue. - * NOTE: this method is called with the sched lock held. + * Returns the number of elements in this queue. NOTE: this method is called with the sched lock + * held. * @return the number of elements in this queue. */ protected abstract int queueSize(); /** - * Returns true if there are procedures available to process. - * NOTE: this method is called with the sched lock held. + * Returns true if there are procedures available to process. NOTE: this method is called with the + * sched lock held. * @return true if there are procedures available to process, otherwise false. */ protected abstract boolean queueHasRunnables(); @@ -224,7 +221,7 @@ public boolean hasRunnables() { } // ============================================================================ - // TODO: Metrics + // TODO: Metrics // ============================================================================ public long getPollCalls() { return pollCalls; @@ -235,13 +232,13 @@ public long getNullPollCalls() { } // ========================================================================== - // Procedure Events + // Procedure Events // ========================================================================== /** - * Wake up all of the given events. - * Note that we first take scheduler lock and then wakeInternal() synchronizes on the event. - * Access should remain package-private. Use ProcedureEvent class to wake/suspend events. + * Wake up all of the given events. Note that we first take scheduler lock and then wakeInternal() + * synchronizes on the event. Access should remain package-private. Use ProcedureEvent class to + * wake/suspend events. * @param events the list of events to wake */ void wakeEvents(ProcedureEvent[] events) { @@ -276,7 +273,7 @@ protected void wakeProcedure(final Procedure procedure) { } // ========================================================================== - // Internal helpers + // Internal helpers // ========================================================================== protected void schedLock() { schedulerLock.lock(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java index 796a8e47c918..69f4fa52034d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureCleaner.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -87,7 +87,7 @@ protected void periodicExecute(final TEnvironment env) { } final long evictTtl = - conf.getInt(ProcedureExecutor.EVICT_TTL_CONF_KEY, ProcedureExecutor.DEFAULT_EVICT_TTL); + conf.getInt(ProcedureExecutor.EVICT_TTL_CONF_KEY, ProcedureExecutor.DEFAULT_EVICT_TTL); final long evictAckTtl = conf.getInt(ProcedureExecutor.EVICT_ACKED_TTL_CONF_KEY, ProcedureExecutor.DEFAULT_ACKED_EVICT_TTL); final int batchSize = conf.getInt(BATCH_SIZE_CONF_KEY, DEFAULT_BATCH_SIZE); @@ -97,7 +97,7 @@ protected void periodicExecute(final TEnvironment env) { final long now = EnvironmentEdgeManager.currentTime(); final Iterator>> it = - completed.entrySet().iterator(); + completed.entrySet().iterator(); while (it.hasNext() && store.isRunning()) { final Map.Entry> entry = it.next(); final CompletedProcedureRetainer retainer = entry.getValue(); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java index d5f1ee7f6c3f..6ba261b8e01b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/CompletedProcedureRetainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,7 +49,7 @@ public void setClientAckTime(long clientAckTime) { } public boolean isExpired(long now, long evictTtl, long evictAckTtl) { - return (hasClientAckTime() && (now - getClientAckTime()) >= evictAckTtl) || - (now - procedure.getLastUpdate()) >= evictTtl; + return (hasClientAckTime() && (now - getClientAckTime()) >= evictAckTtl) + || (now - procedure.getLastUpdate()) >= evictTtl; } } \ No newline at end of file diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java index 3fc975078604..a2e4e659e9f8 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/DelayedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java index 40eb22c3b56e..1bd4490da863 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java index dfe8e7d3c537..5561661d73b7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/FailedRemoteDispatchException.java @@ -21,8 +21,7 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure - * operation. + * Used internally signaling failed queue of a remote procedure operation. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java index 32b4922a0b17..21350b56c23d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/InlineChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java index bfeb7398fa06..c07dcf2dfe44 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockAndQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.util.function.Function; @@ -142,8 +141,8 @@ public boolean tryExclusiveLock(Procedure proc) { * @return whether we should wake the procedures waiting on the lock here. */ public boolean releaseExclusiveLock(Procedure proc) { - if (exclusiveLockOwnerProcedure == null || - exclusiveLockOwnerProcedure.getProcId() != proc.getProcId()) { + if (exclusiveLockOwnerProcedure == null + || exclusiveLockOwnerProcedure.getProcId() != proc.getProcId()) { // We are not the lock owner, it is probably inherited from the parent procedures. return false; } @@ -187,7 +186,7 @@ public Stream filterWaitingQueue(Predicate predicate) { @Override public String toString() { - return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") + - ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); + return "exclusiveLockOwner=" + (hasExclusiveLock() ? getExclusiveLockProcIdOwner() : "NONE") + + ", sharedLockCount=" + getSharedLockCount() + ", waitingProcCount=" + queue.size(); } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java index 33d2a38c80aa..d3723e1a35a7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockStatus.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java index 8599af90d387..e1938ae9573e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockType.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java index 81d1e7212299..0b1ce49e526d 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResource.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -30,9 +29,9 @@ public class LockedResource { private final int sharedLockCount; private final List> waitingProcedures; - public LockedResource(LockedResourceType resourceType, String resourceName, - LockType lockType, Procedure exclusiveLockOwnerProcedure, - int sharedLockCount, List> waitingProcedures) { + public LockedResource(LockedResourceType resourceType, String resourceName, LockType lockType, + Procedure exclusiveLockOwnerProcedure, int sharedLockCount, + List> waitingProcedures) { this.resourceType = resourceType; this.resourceName = resourceName; this.lockType = lockType; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java index 55d195b3920f..d948b68c7d4f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/LockedResourceType.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java index d2e13f135361..a6faf501682b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoNodeDispatchException.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * In particular, no dispatch Node was found for the passed server name - * key AFTER queuing dispatch. + * Used internally signaling failed queue of a remote procedure operation. In particular, no + * dispatch Node was found for the passed server name key AFTER queuing dispatch. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java index 5cdbcd417dea..95265d00a7ba 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NoServerDispatchException.java @@ -20,9 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * In particular, no dispatch Node was found for the passed server name - * key. + * Used internally signaling failed queue of a remote procedure operation. In particular, no + * dispatch Node was found for the passed server name key. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java index 9deac23e1546..502d7ee0b6e1 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/NullTargetServerDispatchException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,8 +20,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Used internally signaling failed queue of a remote procedure operation. - * The target server passed is null. + * Used internally signaling failed queue of a remote procedure operation. The target server passed + * is null. */ @SuppressWarnings("serial") @InterfaceAudience.Private diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java index 2d6e065da675..4f0bc6ce6b29 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/OnePhaseProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java index c0bbfce583ad..ab1c9abd8b49 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/Procedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -113,9 +113,9 @@ public abstract class Procedure implements Comparable

    Bypassing a procedure is not like aborting. Aborting a procedure will trigger - * a rollback. And since the {@link #abort(Object)} method is overrideable - * Some procedures may have chosen to ignore the aborting. + * If bypass is set to true, when executing it will return null when {@link #doExecute(Object)} is + * called to finish the procedure and release any locks it may currently hold. The bypass does + * cleanup around the Procedure as far as the Procedure framework is concerned. It does not clean + * any internal state that the Procedure's themselves may have set. That is for the Procedures to + * do themselves when bypass is called. They should override bypass and do their cleanup in the + * overridden bypass method (be sure to call the parent bypass to ensure proper processing). + *

    + *

    + * Bypassing a procedure is not like aborting. Aborting a procedure will trigger a rollback. And + * since the {@link #abort(Object)} method is overrideable Some procedures may have chosen to + * ignore the aborting. */ private volatile boolean bypass = false; @@ -176,13 +176,13 @@ public boolean isBypass() { } /** - * Set the bypass to true. - * Only called in {@link ProcedureExecutor#bypassProcedure(long, long, boolean, boolean)} for now. - * DO NOT use this method alone, since we can't just bypass one single procedure. We need to - * bypass its ancestor too. If your Procedure has set state, it needs to undo it in here. - * @param env Current environment. May be null because of context; e.g. pretty-printing - * procedure WALs where there is no 'environment' (and where Procedures that require - * an 'environment' won't be run. + * Set the bypass to true. Only called in + * {@link ProcedureExecutor#bypassProcedure(long, long, boolean, boolean)} for now. DO NOT use + * this method alone, since we can't just bypass one single procedure. We need to bypass its + * ancestor too. If your Procedure has set state, it needs to undo it in here. + * @param env Current environment. May be null because of context; e.g. pretty-printing procedure + * WALs where there is no 'environment' (and where Procedures that require an + * 'environment' won't be run. */ protected void bypass(TEnvironment env) { this.bypass = true; @@ -201,64 +201,56 @@ protected final void skipPersistence() { } /** - * The main code of the procedure. It must be idempotent since execute() - * may be called multiple times in case of machine failure in the middle - * of the execution. + * The main code of the procedure. It must be idempotent since execute() may be called multiple + * times in case of machine failure in the middle of the execution. * @param env the environment passed to the ProcedureExecutor * @return a set of sub-procedures to run or ourselves if there is more work to do or null if the * procedure is done. * @throws ProcedureYieldException the procedure will be added back to the queue and retried - * later. + * later. * @throws InterruptedException the procedure will be added back to the queue and retried later. * @throws ProcedureSuspendedException Signal to the executor that Procedure has suspended itself - * and has set itself up waiting for an external event to wake it back up again. + * and has set itself up waiting for an external event to wake it back up again. */ protected abstract Procedure[] execute(TEnvironment env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException; /** - * The code to undo what was done by the execute() code. - * It is called when the procedure or one of the sub-procedures failed or an - * abort was requested. It should cleanup all the resources created by - * the execute() call. The implementation must be idempotent since rollback() - * may be called multiple time in case of machine failure in the middle - * of the execution. + * The code to undo what was done by the execute() code. It is called when the procedure or one of + * the sub-procedures failed or an abort was requested. It should cleanup all the resources + * created by the execute() call. The implementation must be idempotent since rollback() may be + * called multiple time in case of machine failure in the middle of the execution. * @param env the environment passed to the ProcedureExecutor * @throws IOException temporary failure, the rollback will retry later * @throws InterruptedException the procedure will be added back to the queue and retried later */ - protected abstract void rollback(TEnvironment env) - throws IOException, InterruptedException; + protected abstract void rollback(TEnvironment env) throws IOException, InterruptedException; /** - * The abort() call is asynchronous and each procedure must decide how to deal - * with it, if they want to be abortable. The simplest implementation - * is to have an AtomicBoolean set in the abort() method and then the execute() - * will check if the abort flag is set or not. - * abort() may be called multiple times from the client, so the implementation - * must be idempotent. - * - *

    NOTE: abort() is not like Thread.interrupt(). It is just a notification - * that allows the procedure implementor abort. + * The abort() call is asynchronous and each procedure must decide how to deal with it, if they + * want to be abortable. The simplest implementation is to have an AtomicBoolean set in the + * abort() method and then the execute() will check if the abort flag is set or not. abort() may + * be called multiple times from the client, so the implementation must be idempotent. + *

    + * NOTE: abort() is not like Thread.interrupt(). It is just a notification that allows the + * procedure implementor abort. */ protected abstract boolean abort(TEnvironment env); /** - * The user-level code of the procedure may have some state to - * persist (e.g. input arguments or current position in the processing state) to - * be able to resume on failure. + * The user-level code of the procedure may have some state to persist (e.g. input arguments or + * current position in the processing state) to be able to resume on failure. * @param serializer stores the serializable state */ protected abstract void serializeStateData(ProcedureStateSerializer serializer) - throws IOException; + throws IOException; /** - * Called on store load to allow the user to decode the previously serialized - * state. + * Called on store load to allow the user to decode the previously serialized state. * @param serializer contains the serialized state */ protected abstract void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException; + throws IOException; /** * The {@link #doAcquireLock(Object, ProcedureStore)} will be split into two steps, first, it will @@ -321,9 +313,9 @@ protected boolean holdLock(TEnvironment env) { /** * This is used in conjunction with {@link #holdLock(Object)}. If {@link #holdLock(Object)} - * returns true, the procedure executor will call acquireLock() once and thereafter - * not call {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls - * release/acquire around each invocation of {@link #execute(Object)}. + * returns true, the procedure executor will call acquireLock() once and thereafter not call + * {@link #releaseLock(Object)} until the Procedure is done (Normally, it calls release/acquire + * around each invocation of {@link #execute(Object)}. * @see #holdLock(Object) * @return true if the procedure has the lock, false otherwise. */ @@ -332,61 +324,57 @@ public final boolean hasLock() { } /** - * Called when the procedure is loaded for replay. - * The procedure implementor may use this method to perform some quick - * operation before replay. - * e.g. failing the procedure if the state on replay may be unknown. + * Called when the procedure is loaded for replay. The procedure implementor may use this method + * to perform some quick operation before replay. e.g. failing the procedure if the state on + * replay may be unknown. */ protected void beforeReplay(TEnvironment env) { // no-op } /** - * Called when the procedure is ready to be added to the queue after - * the loading/replay operation. + * Called when the procedure is ready to be added to the queue after the loading/replay operation. */ protected void afterReplay(TEnvironment env) { // no-op } /** - * Called when the procedure is marked as completed (success or rollback). - * The procedure implementor may use this method to cleanup in-memory states. - * This operation will not be retried on failure. If a procedure took a lock, - * it will have been released when this method runs. + * Called when the procedure is marked as completed (success or rollback). The procedure + * implementor may use this method to cleanup in-memory states. This operation will not be retried + * on failure. If a procedure took a lock, it will have been released when this method runs. */ protected void completionCleanup(TEnvironment env) { // no-op } /** - * By default, the procedure framework/executor will try to run procedures start to finish. - * Return true to make the executor yield between each execution step to - * give other procedures a chance to run. + * By default, the procedure framework/executor will try to run procedures start to finish. Return + * true to make the executor yield between each execution step to give other procedures a chance + * to run. * @param env the environment passed to the ProcedureExecutor - * @return Return true if the executor should yield on completion of an execution step. - * Defaults to return false. + * @return Return true if the executor should yield on completion of an execution step. Defaults + * to return false. */ protected boolean isYieldAfterExecutionStep(TEnvironment env) { return false; } /** - * By default, the executor will keep the procedure result around util - * the eviction TTL is expired. The client can cut down the waiting time - * by requesting that the result is removed from the executor. - * In case of system started procedure, we can force the executor to auto-ack. + * By default, the executor will keep the procedure result around util the eviction TTL is + * expired. The client can cut down the waiting time by requesting that the result is removed from + * the executor. In case of system started procedure, we can force the executor to auto-ack. * @param env the environment passed to the ProcedureExecutor - * @return true if the executor should wait the client ack for the result. - * Defaults to return true. + * @return true if the executor should wait the client ack for the result. Defaults to return + * true. */ protected boolean shouldWaitClientAck(TEnvironment env) { return true; } /** - * Override this method to provide procedure specific counters for submitted count, failed - * count and time histogram. + * Override this method to provide procedure specific counters for submitted count, failed count + * and time histogram. * @param env The environment passed to the procedure executor * @return Container object for procedure related metric */ @@ -467,13 +455,9 @@ protected StringBuilder toStringSimpleSB() { } /* - * TODO - * Enable later when this is being used. - * Currently owner not used. - if (hasOwner()) { - sb.append(", owner="); - sb.append(getOwner()); - }*/ + * TODO Enable later when this is being used. Currently owner not used. if (hasOwner()) { + * sb.append(", owner="); sb.append(getOwner()); } + */ sb.append(", state="); // pState for Procedure State as opposed to any other kind. toStringState(sb); @@ -535,8 +519,7 @@ protected void toStringState(StringBuilder builder) { } /** - * Extend the toString() information with the procedure details - * e.g. className and parameters + * Extend the toString() information with the procedure details e.g. className and parameters * @param builder the string builder to use to append the proc specific information */ protected void toStringClassDetails(StringBuilder builder) { @@ -544,11 +527,11 @@ protected void toStringClassDetails(StringBuilder builder) { } // ========================================================================== - // Those fields are unchanged after initialization. + // Those fields are unchanged after initialization. // - // Each procedure will get created from the user or during - // ProcedureExecutor.start() during the load() phase and then submitted - // to the executor. these fields will never be changed after initialization + // Each procedure will get created from the user or during + // ProcedureExecutor.start() during the load() phase and then submitted + // to the executor. these fields will never be changed after initialization // ========================================================================== public long getProcId() { return procId; @@ -623,15 +606,14 @@ public void setOwner(User owner) { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected void setSubmittedTime(long submittedTime) { this.submittedTime = submittedTime; } // ========================================================================== - // runtime state - timeout related + // runtime state - timeout related // ========================================================================== /** * @param timeout timeout interval in msec @@ -652,8 +634,7 @@ public int getTimeout() { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected void setLastUpdate(long lastUpdate) { this.lastUpdate = lastUpdate; @@ -671,9 +652,8 @@ public long getLastUpdate() { } /** - * Timeout of the next timeout. - * Called by the ProcedureExecutor if the procedure has timeout set and - * the procedure is in the waiting queue. + * Timeout of the next timeout. Called by the ProcedureExecutor if the procedure has timeout set + * and the procedure is in the waiting queue. * @return the timestamp of the next timeout. */ protected long getTimeoutTimestamp() { @@ -681,7 +661,7 @@ protected long getTimeoutTimestamp() { } // ========================================================================== - // runtime state + // runtime state // ========================================================================== /** * @return the time elapsed between the last update and the start time of the procedure. @@ -707,8 +687,8 @@ protected void setResult(byte[] result) { /** * Will only be called when loading procedures from procedure store, where we need to record - * whether the procedure has already held a lock. Later we will call - * {@link #restoreLock(Object)} to actually acquire the lock. + * whether the procedure has already held a lock. Later we will call {@link #restoreLock(Object)} + * to actually acquire the lock. */ final void lockedWhenLoading() { this.lockedWhenLoading = true; @@ -727,12 +707,12 @@ public boolean isLockedWhenLoading() { } // ============================================================================================== - // Runtime state, updated every operation by the ProcedureExecutor + // Runtime state, updated every operation by the ProcedureExecutor // - // There is always 1 thread at the time operating on the state of the procedure. - // The ProcedureExecutor may check and set states, or some Procecedure may - // update its own state. but no concurrent updates. we use synchronized here - // just because the procedure can get scheduled on different executor threads on each step. + // There is always 1 thread at the time operating on the state of the procedure. + // The ProcedureExecutor may check and set states, or some Procecedure may + // update its own state. but no concurrent updates. we use synchronized here + // just because the procedure can get scheduled on different executor threads on each step. // ============================================================================================== /** @@ -842,8 +822,7 @@ public synchronized RemoteProcedureException getException() { protected synchronized void setChildrenLatch(int numChildren) { this.childrenLatch = numChildren; if (LOG.isTraceEnabled()) { - LOG.trace("CHILD LATCH INCREMENT SET " + - this.childrenLatch, new Throwable(this.toString())); + LOG.trace("CHILD LATCH INCREMENT SET " + this.childrenLatch, new Throwable(this.toString())); } } @@ -862,7 +841,7 @@ protected synchronized void incChildrenLatch() { * Called by the ProcedureExecutor to notify that one of the sub-procedures has completed. */ private synchronized boolean childrenCountDown() { - assert childrenLatch > 0: this; + assert childrenLatch > 0 : this; boolean b = --childrenLatch == 0; if (LOG.isTraceEnabled()) { LOG.trace("CHILD LATCH DECREMENT " + childrenLatch, new Throwable(this.toString())); @@ -871,8 +850,7 @@ private synchronized boolean childrenCountDown() { } /** - * Try to set this procedure into RUNNABLE state. - * Succeeds if all subprocedures/children are done. + * Try to set this procedure into RUNNABLE state. Succeeds if all subprocedures/children are done. * @return True if we were able to move procedure to RUNNABLE state. */ synchronized boolean tryRunnable() { @@ -894,8 +872,8 @@ protected synchronized int getChildrenLatch() { } /** - * Called by the RootProcedureState on procedure execution. - * Each procedure store its stack-index positions. + * Called by the RootProcedureState on procedure execution. Each procedure store its stack-index + * positions. */ protected synchronized void addStackIndex(final int index) { if (stackIndexes == null) { @@ -918,8 +896,7 @@ protected synchronized boolean removeStackIndex() { } /** - * Called on store load to initialize the Procedure internals after - * the creation/deserialization. + * Called on store load to initialize the Procedure internals after the creation/deserialization. */ protected synchronized void setStackIndexes(final List stackIndexes) { this.stackIndexes = new int[stackIndexes.size()]; @@ -937,7 +914,7 @@ protected synchronized int[] getStackIndexes() { } // ========================================================================== - // Internal methods - called by the ProcedureExecutor + // Internal methods - called by the ProcedureExecutor // ========================================================================== /** @@ -962,8 +939,7 @@ protected Procedure[] doExecute(TEnvironment env) /** * Internal method called by the ProcedureExecutor that starts the user-level code rollback(). */ - protected void doRollback(TEnvironment env) - throws IOException, InterruptedException { + protected void doRollback(TEnvironment env) throws IOException, InterruptedException { try { updateTimestamp(); if (bypass) { @@ -1055,7 +1031,7 @@ public int compareTo(final Procedure other) { } // ========================================================================== - // misc utils + // misc utils // ========================================================================== /** diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java index 1b6b93db70c2..9d6f9a4965c0 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureAbortedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java index c5f02e950bc5..ad42634edb95 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureDeque.java @@ -1,5 +1,4 @@ -/** - +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -12,20 +11,18 @@ * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUTKey WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.hbase.procedure2; import java.util.ArrayDeque; - import org.apache.yetus.audience.InterfaceAudience; /** - * Type class. - * For conceptual purpose only. Seeing ProcedureDeque as type instead of just ArrayDeque gives - * more understanding that it's a queue of waiting procedures. + * Type class. For conceptual purpose only. Seeing ProcedureDeque as type instead of just ArrayDeque + * gives more understanding that it's a queue of waiting procedures. */ @InterfaceAudience.Private public class ProcedureDeque extends ArrayDeque { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java index 500345159735..16f82e796806 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureEvent.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; @@ -61,12 +60,12 @@ public synchronized void suspend() { } /** - * Wakes up the suspended procedures by pushing them back into scheduler queues and sets the - * event as ready. - * See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not synchronized. + * Wakes up the suspended procedures by pushing them back into scheduler queues and sets the event + * as ready. See {@link #wakeInternal(AbstractProcedureScheduler)} for why this is not + * synchronized. */ public void wake(AbstractProcedureScheduler procedureScheduler) { - procedureScheduler.wakeEvents(new ProcedureEvent[]{this}); + procedureScheduler.wakeEvents(new ProcedureEvent[] { this }); } /** @@ -89,22 +88,19 @@ public synchronized boolean wakeIfSuspended(AbstractProcedureScheduler procedure * Wakes up all the given events and puts the procedures waiting on them back into * ProcedureScheduler queues. */ - public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent ... events) { + public static void wakeEvents(AbstractProcedureScheduler scheduler, ProcedureEvent... events) { scheduler.wakeEvents(events); } /** - * Only to be used by ProcedureScheduler implementations. - * Reason: To wake up multiple events, locking sequence is - * schedLock --> synchronized (event) - * To wake up an event, both schedLock() and synchronized(event) are required. - * The order is schedLock() --> synchronized(event) because when waking up multiple events - * simultaneously, we keep the scheduler locked until all procedures suspended on these events - * have been added back to the queue (Maybe it's not required? Evaluate!) - * To avoid deadlocks, we want to keep the locking order same even when waking up single event. - * That's why, {@link #wake(AbstractProcedureScheduler)} above uses the same code path as used - * when waking up multiple events. - * Access should remain package-private. + * Only to be used by ProcedureScheduler implementations. Reason: To wake up multiple events, + * locking sequence is schedLock --> synchronized (event) To wake up an event, both schedLock() + * and synchronized(event) are required. The order is schedLock() --> synchronized(event) because + * when waking up multiple events simultaneously, we keep the scheduler locked until all + * procedures suspended on these events have been added back to the queue (Maybe it's not + * required? Evaluate!) To avoid deadlocks, we want to keep the locking order same even when + * waking up single event. That's why, {@link #wake(AbstractProcedureScheduler)} above uses the + * same code path as used when waking up multiple events. Access should remain package-private. */ synchronized void wakeInternal(AbstractProcedureScheduler procedureScheduler) { if (ready && !suspendedProcedures.isEmpty()) { @@ -122,8 +118,8 @@ synchronized void wakeInternal(AbstractProcedureScheduler procedureScheduler) { } /** - * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it - * here for tests. + * Access to suspendedProcedures is 'synchronized' on this object, but it's fine to return it here + * for tests. */ public ProcedureDeque getSuspendedProcedures() { return suspendedProcedures; @@ -131,7 +127,7 @@ public ProcedureDeque getSuspendedProcedures() { @Override public String toString() { - return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + - ", " + suspendedProcedures; + return getClass().getSimpleName() + " for " + object + ", ready=" + isReady() + ", " + + suspendedProcedures; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java index 93cd355c4e0d..b52510286d96 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java index b1dc1a420c8f..9ff1a38e0392 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -62,17 +62,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Thread Pool that executes the submitted procedures. - * The executor has a ProcedureStore associated. - * Each operation is logged and on restart the pending procedures are resumed. - * - * Unless the Procedure code throws an error (e.g. invalid user input) - * the procedure will complete (at some point in time), On restart the pending - * procedures are resumed and the once failed will be rolledback. - * - * The user can add procedures to the executor via submitProcedure(proc) - * check for the finished state via isFinished(procId) - * and get the result via getResult(procId) + * Thread Pool that executes the submitted procedures. The executor has a ProcedureStore associated. + * Each operation is logged and on restart the pending procedures are resumed. Unless the Procedure + * code throws an error (e.g. invalid user input) the procedure will complete (at some point in + * time), On restart the pending procedures are resumed and the once failed will be rolledback. The + * user can add procedures to the executor via submitProcedure(proc) check for the finished state + * via isFinished(procId) and get the result via getResult(procId) */ @InterfaceAudience.Private public class ProcedureExecutor { @@ -88,13 +83,13 @@ public class ProcedureExecutor { public static final String EVICT_TTL_CONF_KEY = "hbase.procedure.cleaner.evict.ttl"; static final int DEFAULT_EVICT_TTL = 15 * 60000; // 15min - public static final String EVICT_ACKED_TTL_CONF_KEY ="hbase.procedure.cleaner.acked.evict.ttl"; + public static final String EVICT_ACKED_TTL_CONF_KEY = "hbase.procedure.cleaner.acked.evict.ttl"; static final int DEFAULT_ACKED_EVICT_TTL = 5 * 60000; // 5min /** - * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to - * break PE having it fail at various junctures. When non-null, testing is set to an instance of - * the below internal {@link Testing} class with flags set for the particular test. + * {@link #testing} is non-null when ProcedureExecutor is being tested. Tests will try to break PE + * having it fail at various junctures. When non-null, testing is set to an instance of the below + * internal {@link Testing} class with flags set for the particular test. */ volatile Testing testing = null; @@ -114,8 +109,8 @@ public static class Testing { /** * Set when we want to fail AFTER state has been stored into the WAL. Rarely used. HBASE-20978 - * is about a case where memory-state was being set after store to WAL where a crash could - * cause us to get stuck. This flag allows killing at what was a vulnerable time. + * is about a case where memory-state was being set after store to WAL where a crash could cause + * us to get stuck. This flag allows killing at what was a vulnerable time. */ protected volatile boolean killAfterStoreUpdate = false; protected volatile boolean toggleKillAfterStoreUpdate = false; @@ -155,32 +150,34 @@ protected boolean shouldKillAfterStoreUpdate(final boolean isSuspended) { public interface ProcedureExecutorListener { void procedureLoaded(long procId); + void procedureAdded(long procId); + void procedureFinished(long procId); } /** - * Map the the procId returned by submitProcedure(), the Root-ProcID, to the Procedure. - * Once a Root-Procedure completes (success or failure), the result will be added to this map. - * The user of ProcedureExecutor should call getResult(procId) to get the result. + * Map the the procId returned by submitProcedure(), the Root-ProcID, to the Procedure. Once a + * Root-Procedure completes (success or failure), the result will be added to this map. The user + * of ProcedureExecutor should call getResult(procId) to get the result. */ private final ConcurrentHashMap> completed = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); /** * Map the the procId returned by submitProcedure(), the Root-ProcID, to the RootProcedureState. - * The RootProcedureState contains the execution stack of the Root-Procedure, - * It is added to the map by submitProcedure() and removed on procedure completion. + * The RootProcedureState contains the execution stack of the Root-Procedure, It is added to the + * map by submitProcedure() and removed on procedure completion. */ private final ConcurrentHashMap> rollbackStack = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); /** - * Helper map to lookup the live procedures by ID. - * This map contains every procedure. root-procedures and subprocedures. + * Helper map to lookup the live procedures by ID. This map contains every procedure. + * root-procedures and subprocedures. */ private final ConcurrentHashMap> procedures = - new ConcurrentHashMap<>(); + new ConcurrentHashMap<>(); /** * Helper map to lookup whether the procedure already issued from the same client. This map @@ -189,40 +186,37 @@ public interface ProcedureExecutorListener { private final ConcurrentHashMap nonceKeysToProcIdsMap = new ConcurrentHashMap<>(); private final CopyOnWriteArrayList listeners = - new CopyOnWriteArrayList<>(); + new CopyOnWriteArrayList<>(); private Configuration conf; /** * Created in the {@link #init(int, boolean)} method. Destroyed in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private ThreadGroup threadGroup; /** - * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private CopyOnWriteArrayList workerThreads; /** * Created in the {@link #init(int, boolean)} method. Terminated in {@link #join()} (FIX! Doing - * resource handling rather than observing in a #join is unexpected). - * Overridden when we do the ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery - * (Should be ok). + * resource handling rather than observing in a #join is unexpected). Overridden when we do the + * ProcedureTestingUtility.testRecoveryAndDoubleExecution trickery (Should be ok). */ private TimeoutExecutorThread timeoutExecutor; /** * WorkerMonitor check for stuck workers and new worker thread when necessary, for example if * there is no worker to assign meta, it will new worker thread for it, so it is very important. - * TimeoutExecutor execute many tasks like DeadServerMetricRegionChore RegionInTransitionChore - * and so on, some tasks may execute for a long time so will block other tasks like - * WorkerMonitor, so use a dedicated thread for executing WorkerMonitor. + * TimeoutExecutor execute many tasks like DeadServerMetricRegionChore RegionInTransitionChore and + * so on, some tasks may execute for a long time so will block other tasks like WorkerMonitor, so + * use a dedicated thread for executing WorkerMonitor. */ private TimeoutExecutorThread workerMonitorExecutor; @@ -272,8 +266,9 @@ private void forceUpdateProcedure(long procId) throws IOException { Procedure proc = procedures.get(procId); if (proc != null) { if (proc.isFinished() && proc.hasParent() && isRootFinished(proc)) { - LOG.debug("Procedure {} has already been finished and parent is succeeded," + - " skip force updating", proc); + LOG.debug("Procedure {} has already been finished and parent is succeeded," + + " skip force updating", + proc); return; } } else { @@ -566,7 +561,7 @@ public void init(int numThreads, boolean abortOnCorruption) throws IOException { this.corePoolSize = numThreads; this.maxPoolSize = 10 * numThreads; LOG.info("Starting {} core workers (bigger of cpus/4 or 16) with max (burst) worker count={}", - corePoolSize, maxPoolSize); + corePoolSize, maxPoolSize); this.threadGroup = new ThreadGroup("PEWorkerGroup"); this.timeoutExecutor = new TimeoutExecutorThread<>(this, threadGroup, "ProcExecTimeout"); @@ -615,7 +610,7 @@ public void startWorkers() throws IOException { LOG.trace("Start workers {}", workerThreads.size()); timeoutExecutor.start(); workerMonitorExecutor.start(); - for (WorkerThread worker: workerThreads) { + for (WorkerThread worker : workerThreads) { worker.start(); } @@ -624,7 +619,7 @@ public void startWorkers() throws IOException { // Add completed cleaner chore addChore(new CompletedProcedureCleaner<>(conf, store, procExecutionLock, completed, - nonceKeysToProcIdsMap)); + nonceKeysToProcIdsMap)); } public void stop() { @@ -647,7 +642,7 @@ public void join() { workerMonitorExecutor.awaitTermination(); // stop the worker threads - for (WorkerThread worker: workerThreads) { + for (WorkerThread worker : workerThreads) { worker.awaitTermination(); } @@ -656,8 +651,8 @@ public void join() { try { threadGroup.destroy(); } catch (IllegalThreadStateException e) { - LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", - this.threadGroup, e.getMessage()); + LOG.error("ThreadGroup {} contains running threads; {}: See STDOUT", this.threadGroup, + e.getMessage()); // This dumps list of threads on STDOUT. this.threadGroup.list(); } @@ -673,12 +668,12 @@ public void join() { public void refreshConfiguration(final Configuration conf) { this.conf = conf; - setKeepAliveTime(conf.getLong(WORKER_KEEP_ALIVE_TIME_CONF_KEY, - DEFAULT_WORKER_KEEP_ALIVE_TIME), TimeUnit.MILLISECONDS); + setKeepAliveTime(conf.getLong(WORKER_KEEP_ALIVE_TIME_CONF_KEY, DEFAULT_WORKER_KEEP_ALIVE_TIME), + TimeUnit.MILLISECONDS); } // ========================================================================== - // Accessors + // Accessors // ========================================================================== public boolean isRunning() { return running.get(); @@ -724,7 +719,7 @@ public long getKeepAliveTime(final TimeUnit timeUnit) { } // ========================================================================== - // Submit/Remove Chores + // Submit/Remove Chores // ========================================================================== /** @@ -753,7 +748,7 @@ public boolean removeChore(@Nullable ProcedureInMemoryChore chore) } // ========================================================================== - // Nonce Procedure helpers + // Nonce Procedure helpers // ========================================================================== /** * Create a NonceKey from the specified nonceGroup and nonce. @@ -766,13 +761,10 @@ public NonceKey createNonceKey(final long nonceGroup, final long nonce) { } /** - * Register a nonce for a procedure that is going to be submitted. - * A procId will be reserved and on submitProcedure(), - * the procedure with the specified nonce will take the reserved ProcId. - * If someone already reserved the nonce, this method will return the procId reserved, - * otherwise an invalid procId will be returned. and the caller should procede - * and submit the procedure. - * + * Register a nonce for a procedure that is going to be submitted. A procId will be reserved and + * on submitProcedure(), the procedure with the specified nonce will take the reserved ProcId. If + * someone already reserved the nonce, this method will return the procId reserved, otherwise an + * invalid procId will be returned. and the caller should procede and submit the procedure. * @param nonceKey A unique identifier for this operation from the client or process. * @return the procId associated with the nonce, if any otherwise an invalid procId. */ @@ -796,9 +788,8 @@ public long registerNonce(final NonceKey nonceKey) { // we found a registered nonce, but the procedure may not have been submitted yet. // since the client expect the procedure to be submitted, spin here until it is. final boolean traceEnabled = LOG.isTraceEnabled(); - while (isRunning() && - !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) && - nonceKeysToProcIdsMap.containsKey(nonceKey)) { + while (isRunning() && !(procedures.containsKey(oldProcId) || completed.containsKey(oldProcId)) + && nonceKeysToProcIdsMap.containsKey(nonceKey)) { if (traceEnabled) { LOG.trace("Waiting for pid=" + oldProcId.longValue() + " to be submitted"); } @@ -828,9 +819,8 @@ public void unregisterNonceIfProcedureWasNotSubmitted(final NonceKey nonceKey) { } /** - * If the failure failed before submitting it, we may want to give back the - * same error to the requests with the same nonceKey. - * + * If the failure failed before submitting it, we may want to give back the same error to the + * requests with the same nonceKey. * @param nonceKey A unique identifier for this operation from the client or process * @param procName name of the procedure, used to inform the user * @param procOwner name of the owner of the procedure, used to inform the user @@ -848,13 +838,13 @@ public void setFailureResultForNonce(NonceKey nonceKey, String procName, User pr } Procedure proc = - new FailedProcedure<>(procId.longValue(), procName, procOwner, nonceKey, exception); + new FailedProcedure<>(procId.longValue(), procName, procOwner, nonceKey, exception); completed.putIfAbsent(procId, new CompletedProcedureRetainer<>(proc)); } // ========================================================================== - // Submit/Abort Procedure + // Submit/Abort Procedure // ========================================================================== /** * Add a new root-procedure to the executor. @@ -866,45 +856,39 @@ public long submitProcedure(Procedure proc) { } /** - * Bypass a procedure. If the procedure is set to bypass, all the logic in - * execute/rollback will be ignored and it will return success, whatever. - * It is used to recover buggy stuck procedures, releasing the lock resources - * and letting other procedures run. Bypassing one procedure (and its ancestors will - * be bypassed automatically) may leave the cluster in a middle state, e.g. region - * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, - * the operators may have to do some clean up on hdfs or schedule some assign procedures - * to let region online. DO AT YOUR OWN RISK. + * Bypass a procedure. If the procedure is set to bypass, all the logic in execute/rollback will + * be ignored and it will return success, whatever. It is used to recover buggy stuck procedures, + * releasing the lock resources and letting other procedures run. Bypassing one procedure (and its + * ancestors will be bypassed automatically) may leave the cluster in a middle state, e.g. region + * not assigned, or some hdfs files left behind. After getting rid of those stuck procedures, the + * operators may have to do some clean up on hdfs or schedule some assign procedures to let region + * online. DO AT YOUR OWN RISK. *

    - * A procedure can be bypassed only if - * 1. The procedure is in state of RUNNABLE, WAITING, WAITING_TIMEOUT - * or it is a root procedure without any child. - * 2. No other worker thread is executing it - * 3. No child procedure has been submitted - * + * A procedure can be bypassed only if 1. The procedure is in state of RUNNABLE, WAITING, + * WAITING_TIMEOUT or it is a root procedure without any child. 2. No other worker thread is + * executing it 3. No child procedure has been submitted *

    - * If all the requirements are meet, the procedure and its ancestors will be - * bypassed and persisted to WAL. - * + * If all the requirements are meet, the procedure and its ancestors will be bypassed and + * persisted to WAL. *

    - * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. - * TODO: What about WAITING_TIMEOUT? + * If the procedure is in WAITING state, will set it to RUNNABLE add it to run queue. TODO: What + * about WAITING_TIMEOUT? * @param pids the procedure id * @param lockWait time to wait lock - * @param force if force set to true, we will bypass the procedure even if it is executing. - * This is for procedures which can't break out during executing(due to bug, mostly) - * In this case, bypassing the procedure is not enough, since it is already stuck - * there. We need to restart the master after bypassing, and letting the problematic - * procedure to execute wth bypass=true, so in that condition, the procedure can be - * successfully bypassed. + * @param force if force set to true, we will bypass the procedure even if it is executing. This + * is for procedures which can't break out during executing(due to bug, mostly) In this + * case, bypassing the procedure is not enough, since it is already stuck there. We need + * to restart the master after bypassing, and letting the problematic procedure to + * execute wth bypass=true, so in that condition, the procedure can be successfully + * bypassed. * @param recursive We will do an expensive search for children of each pid. EXPENSIVE! * @return true if bypass success * @throws IOException IOException */ public List bypassProcedure(List pids, long lockWait, boolean force, - boolean recursive) - throws IOException { + boolean recursive) throws IOException { List result = new ArrayList(pids.size()); - for(long pid: pids) { + for (long pid : pids) { result.add(bypassProcedure(pid, lockWait, force, recursive)); } return result; @@ -919,16 +903,16 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur return false; } - LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}", - procedure, lockWait, override, recursive); + LOG.debug("Begin bypass {} with lockWait={}, override={}, recursive={}", procedure, lockWait, + override, recursive); IdLock.Entry lockEntry = procExecutionLock.tryLockEntry(procedure.getProcId(), lockWait); if (lockEntry == null && !override) { - LOG.debug("Waited {} ms, but {} is still running, skipping bypass with force={}", - lockWait, procedure, override); + LOG.debug("Waited {} ms, but {} is still running, skipping bypass with force={}", lockWait, + procedure, override); return false; } else if (lockEntry == null) { - LOG.debug("Waited {} ms, but {} is still running, begin bypass with force={}", - lockWait, procedure, override); + LOG.debug("Waited {} ms, but {} is still running, begin bypass with force={}", lockWait, + procedure, override); } try { // check whether the procedure is already finished @@ -942,9 +926,9 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur // EXPENSIVE. Checks each live procedure of which there could be many!!! // Is there another way to get children of a procedure? LOG.info("Recursive bypass on children of pid={}", procedure.getProcId()); - this.procedures.forEachValue(1 /*Single-threaded*/, + this.procedures.forEachValue(1 /* Single-threaded */, // Transformer - v -> v.getParentProcId() == procedure.getProcId()? v: null, + v -> v.getParentProcId() == procedure.getProcId() ? v : null, // Consumer v -> { try { @@ -964,8 +948,8 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur && procedure.getState() != ProcedureState.WAITING && procedure.getState() != ProcedureState.WAITING_TIMEOUT) { LOG.debug("Bypassing procedures in RUNNABLE, WAITING and WAITING_TIMEOUT states " - + "(with no parent), {}", - procedure); + + "(with no parent), {}", + procedure); // Question: how is the bypass done here? return false; } @@ -982,7 +966,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur current = getProcedure(parentID); } - //wake up waiting procedure, already checked there is no child + // wake up waiting procedure, already checked there is no child if (procedure.getState() == ProcedureState.WAITING) { procedure.setState(ProcedureState.RUNNABLE); store.update(procedure); @@ -1023,7 +1007,7 @@ boolean bypassProcedure(long pid, long lockWait, boolean override, boolean recur * @param nonceKey the registered unique identifier for this operation from the client or process. * @return the procedure id, that can be used to monitor the operation */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH", + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_NULL_ON_SOME_PATH", justification = "FindBugs is blind to the check-for-null") public long submitProcedure(Procedure proc, NonceKey nonceKey) { Preconditions.checkArgument(lastProcId.get() >= 0); @@ -1107,8 +1091,8 @@ private long pushProcedure(Procedure proc) { } /** - * Send an abort notification the specified procedure. - * Depending on the procedure implementation the abort can be considered or ignored. + * Send an abort notification the specified procedure. Depending on the procedure implementation + * the abort can be considered or ignored. * @param procId the procedure to abort * @return true if the procedure exists and has received the abort, otherwise false. */ @@ -1117,8 +1101,8 @@ public boolean abort(long procId) { } /** - * Send an abort notification to the specified procedure. - * Depending on the procedure implementation, the abort can be considered or ignored. + * Send an abort notification to the specified procedure. Depending on the procedure + * implementation, the abort can be considered or ignored. * @param procId the procedure to abort * @param mayInterruptIfRunning if the proc completed at least one step, should it be aborted? * @return true if the procedure exists and has received the abort, otherwise false. @@ -1135,7 +1119,7 @@ public boolean abort(long procId, boolean mayInterruptIfRunning) { } // ========================================================================== - // Executor query helpers + // Executor query helpers // ========================================================================== public Procedure getProcedure(final long procId) { return procedures.get(procId); @@ -1159,9 +1143,8 @@ public Procedure getResult(long procId) { } /** - * Return true if the procedure is finished. - * The state may be "completed successfully" or "failed and rolledback". - * Use getResult() to check the state or get the result data. + * Return true if the procedure is finished. The state may be "completed successfully" or "failed + * and rolledback". Use getResult() to check the state or get the result data. * @param procId the ID of the procedure to check * @return true if the procedure execution is finished, otherwise false. */ @@ -1211,8 +1194,8 @@ public Procedure getResultOrProcedure(long procId) { * Check if the user is this procedure's owner * @param procId the target procedure * @param user the user - * @return true if the user is the owner of the procedure, - * false otherwise or the owner is unknown. + * @return true if the user is the owner of the procedure, false otherwise or the owner is + * unknown. */ public boolean isProcedureOwner(long procId, User user) { if (user == null) { @@ -1250,19 +1233,19 @@ public Collection> getActiveProceduresNoCopy() { */ public List> getProcedures() { List> procedureList = - new ArrayList<>(procedures.size() + completed.size()); + new ArrayList<>(procedures.size() + completed.size()); procedureList.addAll(procedures.values()); // Note: The procedure could show up twice in the list with different state, as // it could complete after we walk through procedures list and insert into // procedureList - it is ok, as we will use the information in the Procedure // to figure it out; to prevent this would increase the complexity of the logic. completed.values().stream().map(CompletedProcedureRetainer::getProcedure) - .forEach(procedureList::add); + .forEach(procedureList::add); return procedureList; } // ========================================================================== - // Listeners helpers + // Listeners helpers // ========================================================================== public void registerListener(ProcedureExecutorListener listener) { this.listeners.add(listener); @@ -1274,7 +1257,7 @@ public boolean unregisterListener(ProcedureExecutorListener listener) { private void sendProcedureLoadedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureLoaded(procId); } catch (Throwable e) { @@ -1286,7 +1269,7 @@ private void sendProcedureLoadedNotification(final long procId) { private void sendProcedureAddedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureAdded(procId); } catch (Throwable e) { @@ -1298,7 +1281,7 @@ private void sendProcedureAddedNotification(final long procId) { private void sendProcedureFinishedNotification(final long procId) { if (!this.listeners.isEmpty()) { - for (ProcedureExecutorListener listener: this.listeners) { + for (ProcedureExecutorListener listener : this.listeners) { try { listener.procedureFinished(procId); } catch (Throwable e) { @@ -1309,7 +1292,7 @@ private void sendProcedureFinishedNotification(final long procId) { } // ========================================================================== - // Procedure IDs helpers + // Procedure IDs helpers // ========================================================================== private long nextProcId() { long procId = lastProcId.incrementAndGet(); @@ -1341,7 +1324,7 @@ Long getRootProcedureId(Procedure proc) { } // ========================================================================== - // Executions + // Executions // ========================================================================== private void executeProcedure(Procedure proc) { if (proc.isFinished()) { @@ -1577,9 +1560,8 @@ private void cleanupAfterRollbackOneStep(Procedure proc) { } /** - * Execute the rollback of the procedure step. - * It updates the store with the new state (stack index) - * or will remove completly the procedure in case it is a child. + * Execute the rollback of the procedure step. It updates the store with the new state (stack + * index) or will remove completly the procedure in case it is a child. */ private LockState executeRollback(Procedure proc) { try { @@ -1617,36 +1599,38 @@ private void yieldProcedure(Procedure proc) { /** * Executes procedure *

      - *
    • Calls the doExecute() of the procedure - *
    • If the procedure execution didn't fail (i.e. valid user input) - *
        - *
      • ...and returned subprocedures - *
        • The subprocedures are initialized. - *
        • The subprocedures are added to the store - *
        • The subprocedures are added to the runnable queue - *
        • The procedure is now in a WAITING state, waiting for the subprocedures to complete - *
        - *
      • - *
      • ...if there are no subprocedure - *
        • the procedure completed successfully - *
        • if there is a parent (WAITING) - *
        • the parent state will be set to RUNNABLE - *
        - *
      • - *
      - *
    • - *
    • In case of failure - *
        - *
      • The store is updated with the new state
      • - *
      • The executor (caller of this method) will start the rollback of the procedure
      • - *
      - *
    • - *
    + *
  • Calls the doExecute() of the procedure + *
  • If the procedure execution didn't fail (i.e. valid user input) + *
      + *
    • ...and returned subprocedures + *
        + *
      • The subprocedures are initialized. + *
      • The subprocedures are added to the store + *
      • The subprocedures are added to the runnable queue + *
      • The procedure is now in a WAITING state, waiting for the subprocedures to complete + *
      + *
    • + *
    • ...if there are no subprocedure + *
        + *
      • the procedure completed successfully + *
      • if there is a parent (WAITING) + *
      • the parent state will be set to RUNNABLE + *
      + *
    • + *
    + *
  • + *
  • In case of failure + *
      + *
    • The store is updated with the new state
    • + *
    • The executor (caller of this method) will start the rollback of the procedure
    • + *
    + *
  • + * */ private void execProcedure(RootProcedureState procStack, Procedure procedure) { Preconditions.checkArgument(procedure.getState() == ProcedureState.RUNNABLE, - "NOT RUNNABLE! " + procedure.toString()); + "NOT RUNNABLE! " + procedure.toString()); // Procedures can suspend themselves. They skip out by throwing a ProcedureSuspendedException. // The exception is caught below and then we hurry to the exit without disturbing state. The @@ -1697,10 +1681,9 @@ private void execProcedure(RootProcedureState procStack, // Yield the current procedure, and make the subprocedure runnable // subprocs may come back 'null'. subprocs = initializeChildren(procStack, procedure, subprocs); - LOG.info("Initialized subprocedures=" + - (subprocs == null? null: - Stream.of(subprocs).map(e -> "{" + e.toString() + "}"). - collect(Collectors.toList()).toString())); + LOG.info("Initialized subprocedures=" + (subprocs == null ? null + : Stream.of(subprocs).map(e -> "{" + e.toString() + "}") + .collect(Collectors.toList()).toString())); } } else if (procedure.getState() == ProcedureState.WAITING_TIMEOUT) { LOG.trace("Added to timeoutExecutor {}", procedure); @@ -1716,8 +1699,8 @@ private void execProcedure(RootProcedureState procStack, // allows to kill the executor before something is stored to the wal. // useful to test the procedure recovery. - if (testing != null && - testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent())) { + if (testing != null + && testing.shouldKillBeforeStoreUpdate(suspended, procedure.hasParent())) { kill("TESTING: Kill BEFORE store update: " + procedure); } @@ -1738,8 +1721,8 @@ private void execProcedure(RootProcedureState procStack, return; } // if the procedure is kind enough to pass the slot to someone else, yield - if (procedure.isRunnable() && !suspended && - procedure.isYieldAfterExecutionStep(getEnvironment())) { + if (procedure.isRunnable() && !suspended + && procedure.isYieldAfterExecutionStep(getEnvironment())) { yieldProcedure(procedure); return; } @@ -1785,8 +1768,8 @@ private Procedure[] initializeChildren(RootProcedureState subproc = subprocs[i]; if (subproc == null) { String msg = "subproc[" + i + "] is null, aborting the procedure"; - procedure.setFailure(new RemoteProcedureException(msg, - new IllegalArgumentIOException(msg))); + procedure + .setFailure(new RemoteProcedureException(msg, new IllegalArgumentIOException(msg))); return null; } @@ -1837,8 +1820,8 @@ private void countDownChildren(RootProcedureState procStack, // children have completed, move parent to front of the queue. store.update(parent); scheduler.addFront(parent); - LOG.info("Finished subprocedure pid={}, resume processing ppid={}", - procedure.getProcId(), parent.getProcId()); + LOG.info("Finished subprocedure pid={}, resume processing ppid={}", procedure.getProcId(), + parent.getProcId()); return; } } @@ -1881,10 +1864,10 @@ private void handleInterruptedException(Procedure proc, Interrupte private void execCompletionCleanup(Procedure proc) { final TEnvironment env = getEnvironment(); if (proc.hasLock()) { - LOG.warn("Usually this should not happen, we will release the lock before if the procedure" + - " is finished, even if the holdLock is true, arrive here means we have some holes where" + - " we do not release the lock. And the releaseLock below may fail since the procedure may" + - " have already been deleted from the procedure store."); + LOG.warn("Usually this should not happen, we will release the lock before if the procedure" + + " is finished, even if the holdLock is true, arrive here means we have some holes where" + + " we do not release the lock. And the releaseLock below may fail since the procedure may" + + " have already been deleted from the procedure store."); releaseLock(proc, true); } try { @@ -1939,7 +1922,7 @@ public IdLock getProcExecutionLock() { } // ========================================================================== - // Worker Thread + // Worker Thread // ========================================================================== private class WorkerThread extends StoppableThread { private final AtomicLong executionStartTime = new AtomicLong(Long.MAX_VALUE); @@ -1958,6 +1941,7 @@ protected WorkerThread(ThreadGroup group, String prefix) { public void sendStopSignal() { scheduler.signalAll(); } + @Override public void run() { long lastUpdate = EnvironmentEdgeManager.currentTime(); @@ -1984,8 +1968,8 @@ public void run() { procExecutionLock.releaseLockEntry(lockEntry); activeCount = activeExecutorCount.decrementAndGet(); runningCount = store.setRunningProcedureCount(activeCount); - LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), - runningCount, activeCount); + LOG.trace("Halt pid={} runningCount={}, activeCount={}", proc.getProcId(), runningCount, + activeCount); this.activeProcedure = null; lastUpdate = EnvironmentEdgeManager.currentTime(); executionStartTime.set(Long.MAX_VALUE); @@ -2002,7 +1986,7 @@ public void run() { @Override public String toString() { Procedure p = this.activeProcedure; - return getName() + "(pid=" + (p == null? Procedure.NO_PROC_ID: p.getProcId() + ")"); + return getName() + "(pid=" + (p == null ? Procedure.NO_PROC_ID : p.getProcId() + ")"); } /** @@ -2105,12 +2089,11 @@ private void checkThreadCount(final int stuckCount) { } private void refreshConfig() { - addWorkerStuckPercentage = conf.getFloat(WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY, - DEFAULT_WORKER_ADD_STUCK_PERCENTAGE); - timeoutInterval = conf.getInt(WORKER_MONITOR_INTERVAL_CONF_KEY, - DEFAULT_WORKER_MONITOR_INTERVAL); - stuckThreshold = conf.getInt(WORKER_STUCK_THRESHOLD_CONF_KEY, - DEFAULT_WORKER_STUCK_THRESHOLD); + addWorkerStuckPercentage = + conf.getFloat(WORKER_ADD_STUCK_PERCENTAGE_CONF_KEY, DEFAULT_WORKER_ADD_STUCK_PERCENTAGE); + timeoutInterval = + conf.getInt(WORKER_MONITOR_INTERVAL_CONF_KEY, DEFAULT_WORKER_MONITOR_INTERVAL); + stuckThreshold = conf.getInt(WORKER_STUCK_THRESHOLD_CONF_KEY, DEFAULT_WORKER_STUCK_THRESHOLD); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java index cd65c1f74aed..f8232cce950e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureInMemoryChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; @@ -23,13 +22,10 @@ import org.apache.yetus.audience.InterfaceStability; /** - * Special procedure used as a chore. - * Instead of bringing the Chore class in (dependencies reason), - * we reuse the executor timeout thread for this special case. - * - * The assumption is that procedure is used as hook to dispatch other procedures - * or trigger some cleanups. It does not store state in the ProcedureStore. - * this is just for in-memory chore executions. + * Special procedure used as a chore. Instead of bringing the Chore class in (dependencies reason), + * we reuse the executor timeout thread for this special case. The assumption is that procedure is + * used as hook to dispatch other procedures or trigger some cleanups. It does not store state in + * the ProcedureStore. this is just for in-memory chore executions. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -56,12 +52,10 @@ protected boolean abort(final TEnvironment env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java index 48413928e5b7..f86a2b2d00a5 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureMetrics.java @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.hadoop.hbase.metrics.Counter; @@ -26,12 +25,11 @@ * With this interface, the procedure framework provides means to collect following set of metrics * per procedure type for all procedures: *
      - *
    • Count of submitted procedure instances
    • - *
    • Time histogram for successfully completed procedure instances
    • - *
    • Count of failed procedure instances
    • - *
    - * - * Please implement this interface to return appropriate metrics. + *
  • Count of submitted procedure instances
  • + *
  • Time histogram for successfully completed procedure instances
  • + *
  • Count of failed procedure instances
  • + * + * Please implement this interface to return appropriate metrics. */ @InterfaceAudience.Private public interface ProcedureMetrics { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java index 72b2b284ca19..53a3714f268e 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,8 +38,8 @@ public interface ProcedureScheduler { void stop(); /** - * In case the class is blocking on poll() waiting for items to be added, - * this method should awake poll() and poll() should return. + * In case the class is blocking on poll() waiting for items to be added, this method should awake + * poll() and poll() should return. */ void signalAll(); @@ -75,15 +75,14 @@ public interface ProcedureScheduler { void addBack(Procedure proc, boolean notify); /** - * The procedure can't run at the moment. - * add it back to the queue, giving priority to someone else. + * The procedure can't run at the moment. add it back to the queue, giving priority to someone + * else. * @param proc the Procedure to add back to the list */ void yield(Procedure proc); /** - * The procedure in execution completed. - * This can be implemented to perform cleanups. + * The procedure in execution completed. This can be implemented to perform cleanups. * @param proc the Procedure that completed the execution. */ void completionCleanup(Procedure proc); @@ -126,9 +125,9 @@ public interface ProcedureScheduler { int size(); /** - * Clear current state of scheduler such that it is equivalent to newly created scheduler. - * Used for testing failure and recovery. To emulate server crash/restart, - * {@link ProcedureExecutor} resets its own state and calls clear() on scheduler. + * Clear current state of scheduler such that it is equivalent to newly created scheduler. Used + * for testing failure and recovery. To emulate server crash/restart, {@link ProcedureExecutor} + * resets its own state and calls clear() on scheduler. */ void clear(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java index 216022f1c798..fc4eb1532ee4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureStateSerializer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.hbase.thirdparty.com.google.protobuf.Message; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java index 9f521214f075..95fafae72665 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureSuspendedException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java index c557c2021b40..ebf7c922bde4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,10 +46,11 @@ */ @InterfaceAudience.Private public final class ProcedureUtil { - private ProcedureUtil() { } + private ProcedureUtil() { + } // ========================================================================== - // Reflection helpers to create/validate a Procedure object + // Reflection helpers to create/validate a Procedure object // ========================================================================== private static Procedure newProcedure(String className) throws BadProcedureException { try { @@ -67,8 +68,8 @@ private static Procedure newProcedure(String className) throws BadProcedureEx return ctor.newInstance(); } catch (Exception e) { throw new BadProcedureException( - "The procedure class " + className + " must be accessible and have an empty constructor", - e); + "The procedure class " + className + " must be accessible and have an empty constructor", + e); } } @@ -85,18 +86,18 @@ static void validateClass(Procedure proc) throws BadProcedureException { throw new Exception("the " + clazz + " constructor is not public"); } } catch (Exception e) { - throw new BadProcedureException("The procedure class " + proc.getClass().getName() + - " must be accessible and have an empty constructor", e); + throw new BadProcedureException("The procedure class " + proc.getClass().getName() + + " must be accessible and have an empty constructor", e); } } // ========================================================================== - // convert to and from Procedure object + // convert to and from Procedure object // ========================================================================== /** - * A serializer for our Procedures. Instead of the previous serializer, it - * uses the stateMessage list to store the internal state of the Procedures. + * A serializer for our Procedures. Instead of the previous serializer, it uses the stateMessage + * list to store the internal state of the Procedures. */ private static class StateSerializer implements ProcedureStateSerializer { private final ProcedureProtos.Procedure.Builder builder; @@ -113,8 +114,7 @@ public void serialize(Message message) throws IOException { } @Override - public M deserialize(Class clazz) - throws IOException { + public M deserialize(Class clazz) throws IOException { if (deserializeIndex >= builder.getStateMessageCount()) { throw new IOException("Invalid state message index: " + deserializeIndex); } @@ -129,8 +129,8 @@ public M deserialize(Class clazz) } /** - * A serializer (deserializer) for those Procedures which were serialized - * before this patch. It deserializes the old, binary stateData field. + * A serializer (deserializer) for those Procedures which were serialized before this patch. It + * deserializes the old, binary stateData field. */ private static class CompatStateSerializer implements ProcedureStateSerializer { private InputStream inputStream; @@ -146,8 +146,7 @@ public void serialize(Message message) throws IOException { @SuppressWarnings("unchecked") @Override - public M deserialize(Class clazz) - throws IOException { + public M deserialize(Class clazz) throws IOException { Parser parser = (Parser) Internal.getDefaultInstance(clazz).getParserForType(); try { return parser.parseDelimitedFrom(inputStream); @@ -167,12 +166,10 @@ public static ProcedureProtos.Procedure convertToProtoProcedure(Procedure pro Preconditions.checkArgument(proc != null); validateClass(proc); - final ProcedureProtos.Procedure.Builder builder = ProcedureProtos.Procedure.newBuilder() - .setClassName(proc.getClass().getName()) - .setProcId(proc.getProcId()) - .setState(proc.getState()) - .setSubmittedTime(proc.getSubmittedTime()) - .setLastUpdate(proc.getLastUpdate()); + final ProcedureProtos.Procedure.Builder builder = + ProcedureProtos.Procedure.newBuilder().setClassName(proc.getClass().getName()) + .setProcId(proc.getProcId()).setState(proc.getState()) + .setSubmittedTime(proc.getSubmittedTime()).setLastUpdate(proc.getLastUpdate()); if (proc.hasParent()) { builder.setParentId(proc.getParentProcId()); @@ -259,9 +256,9 @@ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) } if (proto.hasException()) { - assert proc.getState() == ProcedureProtos.ProcedureState.FAILED || - proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK : - "The procedure must be failed (waiting to rollback) or rolledback"; + assert proc.getState() == ProcedureProtos.ProcedureState.FAILED + || proc.getState() == ProcedureProtos.ProcedureState.ROLLEDBACK + : "The procedure must be failed (waiting to rollback) or rolledback"; proc.setFailure(RemoteProcedureException.fromProto(proto.getException())); } @@ -298,11 +295,11 @@ public static Procedure convertToProcedure(ProcedureProtos.Procedure proto) } // ========================================================================== - // convert from LockedResource object + // convert from LockedResource object // ========================================================================== - public static LockServiceProtos.LockedResourceType convertToProtoResourceType( - LockedResourceType resourceType) { + public static LockServiceProtos.LockedResourceType + convertToProtoResourceType(LockedResourceType resourceType) { return LockServiceProtos.LockedResourceType.valueOf(resourceType.name()); } @@ -310,13 +307,12 @@ public static LockServiceProtos.LockType convertToProtoLockType(LockType lockTyp return LockServiceProtos.LockType.valueOf(lockType.name()); } - public static LockServiceProtos.LockedResource convertToProtoLockedResource( - LockedResource lockedResource) throws IOException { + public static LockServiceProtos.LockedResource + convertToProtoLockedResource(LockedResource lockedResource) throws IOException { LockServiceProtos.LockedResource.Builder builder = LockServiceProtos.LockedResource.newBuilder(); - builder - .setResourceType(convertToProtoResourceType(lockedResource.getResourceType())) + builder.setResourceType(convertToProtoResourceType(lockedResource.getResourceType())) .setResourceName(lockedResource.getResourceName()) .setLockType(convertToProtoLockType(lockedResource.getLockType())); @@ -331,8 +327,7 @@ public static LockServiceProtos.LockedResource convertToProtoLockedResource( builder.setSharedLockCount(lockedResource.getSharedLockCount()); for (Procedure waitingProcedure : lockedResource.getWaitingProcedures()) { - ProcedureProtos.Procedure waitingProcedureProto = - convertToProtoProcedure(waitingProcedure); + ProcedureProtos.Procedure waitingProcedureProto = convertToProtoProcedure(waitingProcedure); builder.addWaitingProcedures(waitingProcedureProto); } @@ -340,17 +335,17 @@ public static LockServiceProtos.LockedResource convertToProtoLockedResource( } public static final String PROCEDURE_RETRY_SLEEP_INTERVAL_MS = - "hbase.procedure.retry.sleep.interval.ms"; + "hbase.procedure.retry.sleep.interval.ms"; // default to 1 second public static final long DEFAULT_PROCEDURE_RETRY_SLEEP_INTERVAL_MS = 1000; public static final String PROCEDURE_RETRY_MAX_SLEEP_TIME_MS = - "hbase.procedure.retry.max.sleep.time.ms"; + "hbase.procedure.retry.max.sleep.time.ms"; // default to 10 minutes public static final long DEFAULT_PROCEDURE_RETRY_MAX_SLEEP_TIME_MS = - TimeUnit.MINUTES.toMillis(10); + TimeUnit.MINUTES.toMillis(10); /** * Get a retry counter for getting the backoff time. We will use the @@ -363,11 +358,11 @@ public static LockServiceProtos.LockedResource convertToProtoLockedResource( */ public static RetryCounter createRetryCounter(Configuration conf) { long sleepIntervalMs = - conf.getLong(PROCEDURE_RETRY_SLEEP_INTERVAL_MS, DEFAULT_PROCEDURE_RETRY_SLEEP_INTERVAL_MS); + conf.getLong(PROCEDURE_RETRY_SLEEP_INTERVAL_MS, DEFAULT_PROCEDURE_RETRY_SLEEP_INTERVAL_MS); long maxSleepTimeMs = - conf.getLong(PROCEDURE_RETRY_MAX_SLEEP_TIME_MS, DEFAULT_PROCEDURE_RETRY_MAX_SLEEP_TIME_MS); + conf.getLong(PROCEDURE_RETRY_MAX_SLEEP_TIME_MS, DEFAULT_PROCEDURE_RETRY_MAX_SLEEP_TIME_MS); RetryConfig retryConfig = new RetryConfig().setSleepInterval(sleepIntervalMs) - .setMaxSleepTime(maxSleepTimeMs).setBackoffPolicy(new ExponentialBackoffPolicyWithLimit()); + .setMaxSleepTime(maxSleepTimeMs).setBackoffPolicy(new ExponentialBackoffPolicyWithLimit()); return new RetryCounter(retryConfig); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java index 0487ac5b094a..bf78916c48f1 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/ProcedureYieldException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java index 296b97b000fd..d219ae121ff4 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureDispatcher.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; @@ -36,22 +35,23 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Threads; -import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hbase.thirdparty.com.google.common.collect.ArrayListMultimap; +import org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** - * A procedure dispatcher that aggregates and sends after elapsed time or after we hit - * count threshold. Creates its own threadpool to run RPCs with timeout. + * A procedure dispatcher that aggregates and sends after elapsed time or after we hit count + * threshold. Creates its own threadpool to run RPCs with timeout. *
      *
    • Each server queue has a dispatch buffer
    • - *
    • Once the dispatch buffer reaches a threshold-size/time we send
    • + *
    • Once the dispatch buffer reaches a threshold-size/time we send + *
    • *
    - *

    Call {@link #start()} and then {@link #submitTask(Runnable)}. When done, - * call {@link #stop()}. + *

    + * Call {@link #start()} and then {@link #submitTask(Runnable)}. When done, call {@link #stop()}. */ @InterfaceAudience.Private public abstract class RemoteProcedureDispatcher> { @@ -92,8 +92,9 @@ public boolean start() { return false; } - LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " + - "operationDelay={}", this.corePoolSize, this.queueMaxSize, this.operationDelay); + LOG.info("Instantiated, coreThreads={} (allowCoreThreadTimeOut=true), queueMaxSize={}, " + + "operationDelay={}", + this.corePoolSize, this.queueMaxSize, this.operationDelay); // Create the timeout executor timeoutExecutor = new TimeoutExecutorThread(); @@ -102,7 +103,7 @@ public boolean start() { // Create the thread pool that will execute RPCs threadPool = Threads.getBoundedCachedThreadPool(corePoolSize, 60L, TimeUnit.SECONDS, new ThreadFactoryBuilder().setNameFormat(this.getClass().getSimpleName() + "-pool-%d") - .setDaemon(true).setUncaughtExceptionHandler(getUncaughtExceptionHandler()).build()); + .setDaemon(true).setUncaughtExceptionHandler(getUncaughtExceptionHandler()).build()); return true; } @@ -144,14 +145,14 @@ public void join() { protected abstract UncaughtExceptionHandler getUncaughtExceptionHandler(); // ============================================================================================ - // Node Helpers + // Node Helpers // ============================================================================================ /** * Add a node that will be able to execute remote procedures * @param key the node identifier */ public void addNode(final TRemote key) { - assert key != null: "Tried to add a node with a null key"; + assert key != null : "Tried to add a node with a null key"; final BufferNode newNode = new BufferNode(key); nodeMap.putIfAbsent(key, newNode); } @@ -161,8 +162,7 @@ public void addNode(final TRemote key) { * @param key the node identifier */ public void addOperationToNode(final TRemote key, RemoteProcedure rp) - throws NullTargetServerDispatchException, NoServerDispatchException, - NoNodeDispatchException { + throws NullTargetServerDispatchException, NoServerDispatchException, NoNodeDispatchException { if (key == null) { throw new NullTargetServerDispatchException(rp.toString()); } @@ -204,7 +204,7 @@ public boolean removeNode(final TRemote key) { } // ============================================================================================ - // Task Helpers + // Task Helpers // ============================================================================================ protected final void submitTask(Runnable task) { threadPool.execute(task); @@ -215,6 +215,7 @@ protected final void submitTask(Runnable task, long delay, TimeUnit unit) { } protected abstract void remoteDispatch(TRemote key, Set operations); + protected abstract void abortPendingOperations(TRemote key, Set operations); /** @@ -237,11 +238,11 @@ public RemoteProcedure getRemoteProcedure() { */ public interface RemoteProcedure { /** - * For building the remote operation. - * May be empty if no need to send remote call. Usually, this means the RemoteProcedure has been - * finished already. This is possible, as we may have already sent the procedure to RS but then - * the rpc connection is broken so the executeProcedures call fails, but the RS does receive the - * procedure and execute it and then report back, before we retry again. + * For building the remote operation. May be empty if no need to send remote call. Usually, this + * means the RemoteProcedure has been finished already. This is possible, as we may have already + * sent the procedure to RS but then the rpc connection is broken so the executeProcedures call + * fails, but the RS does receive the procedure and execute it and then report back, before we + * retry again. */ Optional remoteCallBuild(TEnv env, TRemote remote); @@ -263,9 +264,8 @@ public interface RemoteProcedure { void remoteOperationFailed(TEnv env, RemoteProcedureException error); /** - * Whether store this remote procedure in dispatched queue - * only OpenRegionProcedure and CloseRegionProcedure return false since they are - * not fully controlled by dispatcher + * Whether store this remote procedure in dispatched queue only OpenRegionProcedure and + * CloseRegionProcedure return false since they are not fully controlled by dispatcher */ default boolean storeInDispatchedQueue() { return true; @@ -295,11 +295,11 @@ protected ArrayListMultimap, RemoteOperation> buildAndGroupRequestByTyp protected List fetchType( final ArrayListMultimap, RemoteOperation> requestByType, final Class type) { - return (List)requestByType.removeAll(type); + return (List) requestByType.removeAll(type); } // ============================================================================================ - // Timeout Helpers + // Timeout Helpers // ============================================================================================ private final class TimeoutExecutorThread extends Thread { private final DelayQueue queue = new DelayQueue(); @@ -311,13 +311,14 @@ public TimeoutExecutorThread() { @Override public void run() { while (running.get()) { - final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, - 20, TimeUnit.SECONDS); + final DelayedWithTimeout task = + DelayedUtil.takeWithoutInterrupt(queue, 20, TimeUnit.SECONDS); if (task == null || task == DelayedUtil.DELAYED_POISON) { if (task == null && queue.size() > 0) { LOG.error("DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting" - + " elapsed. If this is repeated consistently, it means no element is getting expired" - + " from the queue and it might freeze the system. Queue: {}", queue); + + " elapsed. If this is repeated consistently, it means no element is getting expired" + + " from the queue and it might freeze the system. Queue: {}", + queue); } // the executor may be shutting down, and the task is just the shutdown request continue; @@ -349,8 +350,8 @@ public void awaitTermination() { sendStopSignal(); join(250); if (i > 0 && (i % 8) == 0) { - LOG.warn("Waiting termination of thread " + getName() + ", " + - StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); + LOG.warn("Waiting termination of thread " + getName() + ", " + + StringUtils.humanTimeDiff(EnvironmentEdgeManager.currentTime() - startTime)); } } } catch (InterruptedException e) { @@ -360,7 +361,7 @@ public void awaitTermination() { } // ============================================================================================ - // Internals Helpers + // Internals Helpers // ============================================================================================ /** @@ -413,7 +414,7 @@ public synchronized void abortOperationsInQueue() { this.dispatchedOperations.clear(); } - public synchronized void operationCompleted(final RemoteProcedure remoteProcedure){ + public synchronized void operationCompleted(final RemoteProcedure remoteProcedure) { this.dispatchedOperations.remove(remoteProcedure); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java index 91ad920f27f8..7d9dd6deb4c7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RemoteProcedureException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.hadoop.hbase.util.ForeignExceptionUtil; import org.apache.hadoop.ipc.RemoteException; import org.apache.yetus.audience.InterfaceAudience; @@ -32,23 +31,23 @@ * RemoteProcedureExceptions are sent to 'remote' peers to signal an abort in the face of failures. * When serialized for transmission we encode using Protobufs to ensure version compatibility. *

    - * RemoteProcedureException exceptions contain a Throwable as its cause. - * This can be a "regular" exception generated locally or a ProxyThrowable that is a representation - * of the original exception created on original 'remote' source. These ProxyThrowables have their - * their stacks traces and messages overridden to reflect the original 'remote' exception. + * RemoteProcedureException exceptions contain a Throwable as its cause. This can be a "regular" + * exception generated locally or a ProxyThrowable that is a representation of the original + * exception created on original 'remote' source. These ProxyThrowables have their their stacks + * traces and messages overridden to reflect the original 'remote' exception. */ @InterfaceAudience.Private @InterfaceStability.Evolving @SuppressWarnings("serial") public class RemoteProcedureException extends ProcedureException { /** - * Name of the throwable's source such as a host or thread name. Must be non-null. + * Name of the throwable's source such as a host or thread name. Must be non-null. */ private final String source; /** - * Create a new RemoteProcedureException that can be serialized. - * It is assumed that this came form a local source. + * Create a new RemoteProcedureException that can be serialized. It is assumed that this came form + * a local source. * @param source the host or thread name of the source * @param cause the actual cause of the exception */ @@ -66,10 +65,10 @@ public String getSource() { public Exception unwrapRemoteException() { final Throwable cause = getCause(); if (cause instanceof RemoteException) { - return ((RemoteException)cause).unwrapRemoteException(); + return ((RemoteException) cause).unwrapRemoteException(); } if (cause instanceof Exception) { - return (Exception)cause; + return (Exception) cause; } return new Exception(cause); } @@ -81,7 +80,7 @@ public Exception unwrapRemoteException() { public IOException unwrapRemoteIOException() { final Exception cause = unwrapRemoteException(); if (cause instanceof IOException) { - return (IOException)cause; + return (IOException) cause; } return new IOException(cause); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java index 440f9e7d6ec1..9990bdeb4306 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/RootProcedureState.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -29,15 +29,11 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; /** - * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". - * A "Root Procedure" is a Procedure without parent, each subprocedure will be - * added to the "Root Procedure" stack (or rollback-stack). - * - * RootProcedureState is used and managed only by the ProcedureExecutor. - * Long rootProcId = getRootProcedureId(proc); - * rollbackStack.get(rootProcId).acquire(proc) - * rollbackStack.get(rootProcId).release(proc) - * ... + * Internal state of the ProcedureExecutor that describes the state of a "Root Procedure". A "Root + * Procedure" is a Procedure without parent, each subprocedure will be added to the "Root Procedure" + * stack (or rollback-stack). RootProcedureState is used and managed only by the ProcedureExecutor. + * Long rootProcId = getRootProcedureId(proc); rollbackStack.get(rootProcId).acquire(proc) + * rollbackStack.get(rootProcId).release(proc) ... */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -45,9 +41,9 @@ class RootProcedureState { private static final Logger LOG = LoggerFactory.getLogger(RootProcedureState.class); private enum State { - RUNNING, // The Procedure is running or ready to run - FAILED, // The Procedure failed, waiting for the rollback executing - ROLLINGBACK, // The Procedure failed and the execution was rolledback + RUNNING, // The Procedure is running or ready to run + FAILED, // The Procedure failed, waiting for the rollback executing + ROLLINGBACK, // The Procedure failed and the execution was rolledback } private Set> subprocs = null; @@ -102,7 +98,7 @@ protected synchronized List> getSubproceduresStack() { protected synchronized RemoteProcedureException getException() { if (subprocStack != null) { - for (Procedure proc: subprocStack) { + for (Procedure proc : subprocStack) { if (proc.hasException()) { return proc.getException(); } @@ -137,8 +133,8 @@ protected synchronized void abort() { } /** - * Called by the ProcedureExecutor after the procedure step is completed, - * to add the step to the rollback list (or procedure stack) + * Called by the ProcedureExecutor after the procedure step is completed, to add the step to the + * rollback list (or procedure stack) */ protected synchronized void addRollbackStep(Procedure proc) { if (proc.isFailed()) { @@ -163,11 +159,10 @@ protected synchronized void addSubProcedure(Procedure proc) { } /** - * Called on store load by the ProcedureExecutor to load part of the stack. - * - * Each procedure has its own stack-positions. Which means we have to write - * to the store only the Procedure we executed, and nothing else. - * on load we recreate the full stack by aggregating each procedure stack-positions. + * Called on store load by the ProcedureExecutor to load part of the stack. Each procedure has its + * own stack-positions. Which means we have to write to the store only the Procedure we executed, + * and nothing else. on load we recreate the full stack by aggregating each procedure + * stack-positions. */ protected synchronized void loadStack(Procedure proc) { addSubProcedure(proc); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java index 20abf651e306..d40986b1e12b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SequentialProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,24 +15,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.SequentialProcedureData; /** * A SequentialProcedure describes one step in a procedure chain: + * *

      *   -> Step 1 -> Step 2 -> Step 3
      * 
    - * The main difference from a base Procedure is that the execute() of a - * SequentialProcedure will be called only once; there will be no second - * execute() call once the children are finished. which means once the child - * of a SequentialProcedure are completed the SequentialProcedure is completed too. + * + * The main difference from a base Procedure is that the execute() of a SequentialProcedure will be + * called only once; there will be no second execute() call once the children are finished. which + * means once the child of a SequentialProcedure are completed the SequentialProcedure is completed + * too. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -53,8 +54,7 @@ protected Procedure[] doExecute(final TEnvironment env) } @Override - protected void doRollback(final TEnvironment env) - throws IOException, InterruptedException { + protected void doRollback(final TEnvironment env) throws IOException, InterruptedException { updateTimestamp(); if (executed) { try { @@ -67,16 +67,14 @@ protected void doRollback(final TEnvironment env) } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { SequentialProcedureData.Builder data = SequentialProcedureData.newBuilder(); data.setExecuted(executed); serializer.serialize(data.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { SequentialProcedureData data = serializer.deserialize(SequentialProcedureData.class); executed = data.getExecuted(); } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java index 2b043d472d0e..f2b4d4820da7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/SimpleProcedureScheduler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import java.util.Collections; @@ -80,8 +79,7 @@ public List getLocks() { } @Override - public LockedResource getLockResource(LockedResourceType resourceType, - String resourceName) { + public LockedResource getLockResource(LockedResourceType resourceType, String resourceName) { return null; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java index 797381db4e24..5503eaa2f254 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StateMachineProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; @@ -32,19 +31,17 @@ /** * Procedure described by a series of steps. - * - *

    The procedure implementor must have an enum of 'states', describing - * the various step of the procedure. - * Once the procedure is running, the procedure-framework will call executeFromState() - * using the 'state' provided by the user. The first call to executeFromState() - * will be performed with 'state = null'. The implementor can jump between - * states using setNextState(MyStateEnum.ordinal()). - * The rollback will call rollbackState() for each state that was executed, in reverse order. + *

    + * The procedure implementor must have an enum of 'states', describing the various step of the + * procedure. Once the procedure is running, the procedure-framework will call executeFromState() + * using the 'state' provided by the user. The first call to executeFromState() will be performed + * with 'state = null'. The implementor can jump between states using + * setNextState(MyStateEnum.ordinal()). The rollback will call rollbackState() for each state that + * was executed, in reverse order. */ @InterfaceAudience.Private @InterfaceStability.Evolving -public abstract class StateMachineProcedure - extends Procedure { +public abstract class StateMachineProcedure extends Procedure { private static final Logger LOG = LoggerFactory.getLogger(StateMachineProcedure.class); private static final int EOF_STATE = Integer.MIN_VALUE; @@ -72,18 +69,17 @@ protected final int getCycles() { private int previousState; public enum Flow { - HAS_MORE_STATE, - NO_MORE_STATE, + HAS_MORE_STATE, NO_MORE_STATE, } /** * called to perform a single step of the specified 'state' of the procedure * @param state state to execute - * @return Flow.NO_MORE_STATE if the procedure is completed, - * Flow.HAS_MORE_STATE if there is another step. + * @return Flow.NO_MORE_STATE if the procedure is completed, Flow.HAS_MORE_STATE if there is + * another step. */ protected abstract Flow executeFromState(TEnvironment env, TState state) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException; /** * called to perform the rollback of the specified state @@ -91,7 +87,7 @@ protected abstract Flow executeFromState(TEnvironment env, TState state) * @throws IOException temporary failure, the rollback will retry later */ protected abstract void rollbackState(TEnvironment env, TState state) - throws IOException, InterruptedException; + throws IOException, InterruptedException; /** * Convert an ordinal (or state id) to an Enum (or more descriptive) state object. @@ -123,9 +119,9 @@ protected void setNextState(final TState state) { } /** - * By default, the executor will try ro run all the steps of the procedure start to finish. - * Return true to make the executor yield between execution steps to - * give other procedures time to run their steps. + * By default, the executor will try ro run all the steps of the procedure start to finish. Return + * true to make the executor yield between execution steps to give other procedures time to run + * their steps. * @param state the state we are going to execute next. * @return Return true if the executor should yield before the execution of the specified step. * Defaults to return false. @@ -138,8 +134,8 @@ protected boolean isYieldBeforeExecuteFromState(TEnvironment env, TState state) * Add a child procedure to execute * @param subProcedure the child procedure */ - protected > void addChildProcedure( - @SuppressWarnings("unchecked") T... subProcedure) { + protected > void + addChildProcedure(@SuppressWarnings("unchecked") T... subProcedure) { if (subProcedure == null) { return; } @@ -162,7 +158,7 @@ protected > void addChildProcedure( @Override protected Procedure[] execute(final TEnvironment env) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { updateTimestamp(); try { failIfAborted(); @@ -177,7 +173,7 @@ protected Procedure[] execute(final TEnvironment env) } if (LOG.isTraceEnabled()) { - LOG.trace(state + " " + this + "; cycles=" + this.cycles); + LOG.trace(state + " " + this + "; cycles=" + this.cycles); } // Keep running count of cycles if (getStateId(state) != this.previousState) { @@ -198,15 +194,14 @@ protected Procedure[] execute(final TEnvironment env) subProcList = null; return subProcedures; } - return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] {this}; + return (isWaiting() || isFailed() || !hasMoreState()) ? null : new Procedure[] { this }; } finally { updateTimestamp(); } } @Override - protected void rollback(final TEnvironment env) - throws IOException, InterruptedException { + protected void rollback(final TEnvironment env) throws IOException, InterruptedException { if (isEofState()) { stateCount--; } @@ -221,7 +216,7 @@ protected void rollback(final TEnvironment env) } protected boolean isEofState() { - return stateCount > 0 && states[stateCount-1] == EOF_STATE; + return stateCount > 0 && states[stateCount - 1] == EOF_STATE; } @Override @@ -254,8 +249,8 @@ protected final void failIfAborted() { } /** - * Used by the default implementation of abort() to know if the current state can be aborted - * and rollback can be triggered. + * Used by the default implementation of abort() to know if the current state can be aborted and + * rollback can be triggered. */ protected boolean isRollbackSupported(final TState state) { return false; @@ -271,7 +266,7 @@ private boolean hasMoreState() { } protected TState getCurrentState() { - return stateCount > 0 ? getState(states[stateCount-1]) : getInitialState(); + return stateCount > 0 ? getState(states[stateCount - 1]) : getInitialState(); } /** @@ -308,8 +303,7 @@ protected void toStringState(StringBuilder builder) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { StateMachineProcedureData.Builder data = StateMachineProcedureData.newBuilder(); for (int i = 0; i < stateCount; ++i) { data.addState(states[i]); @@ -318,8 +312,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { StateMachineProcedureData data = serializer.deserialize(StateMachineProcedureData.class); stateCount = data.getStateCount(); if (stateCount > 0) { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java index b58b571a9345..4d0d8941dedf 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/StoppableThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java index fc917b6f36ed..a95fba7160ad 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TimeoutExecutorThread.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -39,7 +39,7 @@ class TimeoutExecutorThread extends StoppableThread { private final DelayQueue queue = new DelayQueue<>(); public TimeoutExecutorThread(ProcedureExecutor executor, ThreadGroup group, - String name) { + String name) { super(group, name); setDaemon(true); this.executor = executor; @@ -53,8 +53,7 @@ public void sendStopSignal() { @Override public void run() { while (executor.isRunning()) { - final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, 20, - TimeUnit.SECONDS); + final DelayedWithTimeout task = DelayedUtil.takeWithoutInterrupt(queue, 20, TimeUnit.SECONDS); if (task == null || task == DelayedUtil.DELAYED_POISON) { // the executor may be shutting down, // and the task is just the shutdown request diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java index b8ddad21866e..7a15ebfc494c 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/TwoPhaseProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java index aba71b95d6da..ec0915063185 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/InMemoryProcedureIterator.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java index 7a9ea1b0d314..9d668dc1688b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/LeaseRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java index 8fbc1473ed7e..8a4dd403cd20 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/NoopProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2.store; import java.io.IOException; - import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java index c1eaa73230fc..1010e0e5dc18 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.procedure2.store; import java.io.IOException; - import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -85,9 +84,8 @@ public interface ProcedureIterator { void reset(); /** - * Returns true if the iterator has more elements. - * (In other words, returns true if next() would return a Procedure - * rather than throwing an exception.) + * Returns true if the iterator has more elements. (In other words, returns true if next() would + * return a Procedure rather than throwing an exception.) * @return true if the iterator has more procedures */ boolean hasNext(); @@ -135,8 +133,8 @@ public interface ProcedureLoader { void load(ProcedureIterator procIter) throws IOException; /** - * Called by the ProcedureStore.load() in case we have procedures not-ready to be added to - * the executor, which probably means they are corrupted since some information/link is missing. + * Called by the ProcedureStore.load() in case we have procedures not-ready to be added to the + * executor, which probably means they are corrupted since some information/link is missing. * @param procIter iterator over the procedures not ready to be added to the executor, corrupted */ void handleCorrupted(ProcedureIterator procIter) throws IOException; @@ -178,8 +176,8 @@ public interface ProcedureLoader { int getNumThreads(); /** - * Set the number of procedure running. - * This can be used, for example, by the store to know how long to wait before a sync. + * Set the number of procedure running. This can be used, for example, by the store to know how + * long to wait before a sync. * @return how many procedures are running (may not be same as count). */ int setRunningProcedureCount(int count); @@ -201,54 +199,45 @@ public interface ProcedureLoader { void load(ProcedureLoader loader) throws IOException; /** - * When a procedure is submitted to the executor insert(proc, null) will be called. - * 'proc' has a 'RUNNABLE' state and the initial information required to start up. - * - * When a procedure is executed and it returns children insert(proc, subprocs) will be called. - * 'proc' has a 'WAITING' state and an update state. - * 'subprocs' are the children in 'RUNNABLE' state with the initial information. - * + * When a procedure is submitted to the executor insert(proc, null) will be called. 'proc' has a + * 'RUNNABLE' state and the initial information required to start up. When a procedure is executed + * and it returns children insert(proc, subprocs) will be called. 'proc' has a 'WAITING' state and + * an update state. 'subprocs' are the children in 'RUNNABLE' state with the initial information. * @param proc the procedure to serialize and write to the store. * @param subprocs the newly created child of the proc. */ void insert(Procedure proc, Procedure[] subprocs); /** - * Serialize a set of new procedures. - * These procedures are freshly submitted to the executor and each procedure - * has a 'RUNNABLE' state and the initial information required to start up. - * + * Serialize a set of new procedures. These procedures are freshly submitted to the executor and + * each procedure has a 'RUNNABLE' state and the initial information required to start up. * @param procs the procedures to serialize and write to the store. */ void insert(Procedure[] procs); /** - * The specified procedure was executed, - * and the new state should be written to the store. + * The specified procedure was executed, and the new state should be written to the store. * @param proc the procedure to serialize and write to the store. */ void update(Procedure proc); /** - * The specified procId was removed from the executor, - * due to completion, abort or failure. - * The store implementor should remove all the information about the specified procId. + * The specified procId was removed from the executor, due to completion, abort or failure. The + * store implementor should remove all the information about the specified procId. * @param procId the ID of the procedure to remove. */ void delete(long procId); /** - * The parent procedure completed. - * Update the state and mark all the child deleted. + * The parent procedure completed. Update the state and mark all the child deleted. * @param parentProc the parent procedure to serialize and write to the store. * @param subProcIds the IDs of the sub-procedure to remove. */ void delete(Procedure parentProc, long[] subProcIds); /** - * The specified procIds were removed from the executor, - * due to completion, abort or failure. - * The store implementor should remove all the information about the specified procIds. + * The specified procIds were removed from the executor, due to completion, abort or failure. The + * store implementor should remove all the information about the specified procIds. * @param procIds the IDs of the procedures to remove. * @param offset the array offset from where to start to delete * @param count the number of IDs to delete diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java index a5c04fab200c..be0a148d6379 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStoreBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,14 +27,13 @@ @InterfaceAudience.Private public abstract class ProcedureStoreBase implements ProcedureStore { private final CopyOnWriteArrayList listeners = - new CopyOnWriteArrayList<>(); + new CopyOnWriteArrayList<>(); private final AtomicBoolean running = new AtomicBoolean(false); /** - * Change the state to 'isRunning', - * returns true if the store state was changed, - * false if the store was already in that state. + * Change the state to 'isRunning', returns true if the store state was changed, false if the + * store was already in that state. * @param isRunning the state to set. * @return true if the store state was changed, otherwise false. */ diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java index 4e615b971d8a..7fe234a0f5b7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProcedureTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -86,7 +86,7 @@ private ProcedureTree(Map procMap) { } checkOrphan(procMap); Comparator cmp = - (p1, p2) -> Long.compare(p1.getProto().getProcId(), p2.getProto().getProcId()); + (p1, p2) -> Long.compare(p1.getProto().getProcId(), p2.getProto().getProcId()); Collections.sort(validProcs, cmp); Collections.sort(corruptedProcs, cmp); } @@ -109,7 +109,7 @@ private List buildTree(Map procMap) { } private void collectStackId(Entry entry, Map> stackId2Proc, - MutableInt maxStackId) { + MutableInt maxStackId) { if (LOG.isDebugEnabled()) { LOG.debug("Procedure {} stack ids={}", entry, entry.proc.getStackIdList()); } @@ -124,7 +124,7 @@ private void collectStackId(Entry entry, Map> stackId2Proc, } private void addAllToCorruptedAndRemoveFromProcMap(Entry entry, - Map remainingProcMap) { + Map remainingProcMap) { corruptedProcs.add(new ProtoAndProcedure(entry.proc)); remainingProcMap.remove(entry.proc.getProcId()); for (Entry e : entry.subProcs) { @@ -167,8 +167,9 @@ private void checkReady(Entry rootEntry, Map remainingProcMap) { rootEntry); valid = false; } else if (entries.size() > 1) { - LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," + - " root procedure is {}", entries, i, maxStackId, rootEntry); + LOG.error("Multiple procedures {} have the same stack id {}, max stack id is {}," + + " root procedure is {}", + entries, i, maxStackId, rootEntry); valid = false; } } diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java index 0cdc48041003..593f1f967e3f 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/ProtoAndProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java index 98416a527b8e..fc82cc88727b 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/BitSetNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -271,7 +271,7 @@ public void unsetPartialFlag() { */ public ProcedureProtos.ProcedureStoreTracker.TrackerNode convert() { ProcedureProtos.ProcedureStoreTracker.TrackerNode.Builder builder = - ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder(); + ProcedureProtos.ProcedureStoreTracker.TrackerNode.newBuilder(); builder.setStartId(start); for (int i = 0; i < modified.length; ++i) { builder.addUpdated(modified[i]); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java index dc9d16c41f8e..a47b2664a9e7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/CorruptedWALProcedureStoreException.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java index 3436e8b76697..dc33ab8d91a7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureStoreTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,10 +32,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; /** - * Keeps track of live procedures. - * - * It can be used by the ProcedureStore to identify which procedures are already - * deleted/completed to avoid the deserialization step on restart + * Keeps track of live procedures. It can be used by the ProcedureStore to identify which procedures + * are already deleted/completed to avoid the deserialization step on restart * @deprecated Since 2.3.0, will be removed in 4.0.0. Keep here only for rolling upgrading, now we * use the new region based procedure store. */ @@ -48,29 +46,30 @@ class ProcedureStoreTracker { private final TreeMap map = new TreeMap<>(); /** - * If true, do not remove bits corresponding to deleted procedures. Note that this can result - * in huge bitmaps overtime. - * Currently, it's set to true only when building tracker state from logs during recovery. During - * recovery, if we are sure that a procedure has been deleted, reading its old update entries - * can be skipped. + * If true, do not remove bits corresponding to deleted procedures. Note that this can result in + * huge bitmaps overtime. Currently, it's set to true only when building tracker state from logs + * during recovery. During recovery, if we are sure that a procedure has been deleted, reading its + * old update entries can be skipped. */ private boolean keepDeletes = false; /** - * If true, it means tracker has incomplete information about the active/deleted procedures. - * It's set to true only when recovering from old logs. See {@link #isDeleted(long)} docs to - * understand it's real use. + * If true, it means tracker has incomplete information about the active/deleted procedures. It's + * set to true only when recovering from old logs. See {@link #isDeleted(long)} docs to understand + * it's real use. */ boolean partial = false; private long minModifiedProcId = Long.MAX_VALUE; private long maxModifiedProcId = Long.MIN_VALUE; - public enum DeleteState { YES, NO, MAYBE } + public enum DeleteState { + YES, NO, MAYBE + } public void resetToProto(ProcedureProtos.ProcedureStoreTracker trackerProtoBuf) { reset(); - for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode : - trackerProtoBuf.getNodeList()) { + for (ProcedureProtos.ProcedureStoreTracker.TrackerNode protoNode : trackerProtoBuf + .getNodeList()) { final BitSetNode node = new BitSetNode(protoNode); map.put(node.getStart(), node); } @@ -182,6 +181,7 @@ public void setMinMaxModifiedProcIds(long min, long max) { this.minModifiedProcId = min; this.maxModifiedProcId = max; } + /** * This method is used when restarting where we need to rebuild the ProcedureStoreTracker. The * {@link #delete(long)} method above assume that the {@link BitSetNode} exists, but when restart @@ -236,8 +236,8 @@ private void setDeleteIf(ProcedureStoreTracker tracker, * @see #setDeletedIfModifiedInBoth(ProcedureStoreTracker) */ public void setDeletedIfDeletedByThem(ProcedureStoreTracker tracker) { - setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId) || - node.isDeleted(procId) == DeleteState.YES); + setDeleteIf(tracker, (node, procId) -> node == null || !node.contains(procId) + || node.isDeleted(procId) == DeleteState.YES); } /** @@ -288,16 +288,15 @@ public void reset() { public boolean isModified(long procId) { final Map.Entry entry = map.floorEntry(procId); - return entry != null && entry.getValue().contains(procId) && - entry.getValue().isModified(procId); + return entry != null && entry.getValue().contains(procId) + && entry.getValue().isModified(procId); } /** * If {@link #partial} is false, returns state from the bitmap. If no state is found for - * {@code procId}, returns YES. - * If partial is true, tracker doesn't have complete view of system state, so it returns MAYBE - * if there is no update for the procedure or if it doesn't have a state in bitmap. Otherwise, - * returns state from the bitmap. + * {@code procId}, returns YES. If partial is true, tracker doesn't have complete view of system + * state, so it returns MAYBE if there is no update for the procedure or if it doesn't have a + * state in bitmap. Otherwise, returns state from the bitmap. */ public DeleteState isDeleted(long procId) { Map.Entry entry = map.floorEntry(procId); @@ -374,12 +373,12 @@ public boolean isAllModified() { */ public long[] getAllActiveProcIds() { return map.values().stream().map(BitSetNode::getActiveProcIds).filter(p -> p.length > 0) - .flatMapToLong(LongStream::of).toArray(); + .flatMapToLong(LongStream::of).toArray(); } /** - * Clears the list of updated procedure ids. This doesn't affect global list of active - * procedure ids. + * Clears the list of updated procedure ids. This doesn't affect global list of active procedure + * ids. */ public void resetModified() { for (Map.Entry entry : map.entrySet()) { @@ -472,12 +471,11 @@ public void dump() { } // ======================================================================== - // Convert to/from Protocol Buffer. + // Convert to/from Protocol Buffer. // ======================================================================== /** - * Builds - * org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker + * Builds org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos.ProcedureStoreTracker * protocol buffer from current state. */ public ProcedureProtos.ProcedureStoreTracker toProto() throws IOException { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java index 947d5bd9d650..6734f50695a7 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFile.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -64,8 +64,8 @@ public ProcedureWALFile(final FileSystem fs, final FileStatus logStatus) { tracker.setPartialFlag(true); } - public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, - long startPos, long timestamp) { + public ProcedureWALFile(FileSystem fs, Path logFile, ProcedureWALHeader header, long startPos, + long timestamp) { this.fs = fs; this.header = header; this.logFile = logFile; @@ -205,7 +205,7 @@ public boolean equals(Object o) { return false; } - return compareTo((ProcedureWALFile)o) == 0; + return compareTo((ProcedureWALFile) o) == 0; } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java index bc60584126fb..1dafb3cdac19 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormat.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2.store.wal; import java.io.IOException; @@ -73,7 +72,8 @@ interface Loader extends ProcedureLoader { void markCorruptedWAL(ProcedureWALFile log, IOException e); } - private ProcedureWALFormat() {} + private ProcedureWALFormat() { + } /** * Load all the procedures in these ProcedureWALFiles, and rebuild the given {@code tracker} if @@ -116,28 +116,17 @@ public static void writeHeader(OutputStream stream, ProcedureWALHeader header) } /* - * +-----------------+ - * | END OF WAL DATA | <---+ - * +-----------------+ | - * | | | - * | Tracker | | - * | | | - * +-----------------+ | - * | version | | - * +-----------------+ | - * | TRAILER_MAGIC | | - * +-----------------+ | - * | offset |-----+ - * +-----------------+ + * +-----------------+ | END OF WAL DATA | <---+ +-----------------+ | | | | | Tracker | | | | | + * +-----------------+ | | version | | +-----------------+ | | TRAILER_MAGIC | | + * +-----------------+ | | offset |-----+ +-----------------+ */ public static long writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker tracker) throws IOException { long offset = stream.getPos(); // Write EOF Entry - ProcedureWALEntry.newBuilder() - .setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF) - .build().writeDelimitedTo(stream); + ProcedureWALEntry.newBuilder().setType(ProcedureWALEntry.Type.PROCEDURE_WAL_EOF).build() + .writeDelimitedTo(stream); // Write Tracker tracker.toProto().writeDelimitedTo(stream); @@ -148,8 +137,7 @@ public static long writeTrailer(FSDataOutputStream stream, ProcedureStoreTracker return stream.getPos() - offset; } - public static ProcedureWALHeader readHeader(InputStream stream) - throws IOException { + public static ProcedureWALHeader readHeader(InputStream stream) throws IOException { ProcedureWALHeader header; try { header = ProcedureWALHeader.parseDelimitedFrom(stream); @@ -162,8 +150,8 @@ public static ProcedureWALHeader readHeader(InputStream stream) } if (header.getVersion() < 0 || header.getVersion() != HEADER_VERSION) { - throw new InvalidWALDataException("Invalid Header version. got " + header.getVersion() + - " expected " + HEADER_VERSION); + throw new InvalidWALDataException( + "Invalid Header version. got " + header.getVersion() + " expected " + HEADER_VERSION); } if (header.getType() < 0 || header.getType() > LOG_TYPE_MAX_VALID) { @@ -185,14 +173,14 @@ public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long sta stream.seek(trailerPos); int version = stream.read(); if (version != TRAILER_VERSION) { - throw new InvalidWALDataException("Invalid Trailer version. got " + version + - " expected " + TRAILER_VERSION); + throw new InvalidWALDataException( + "Invalid Trailer version. got " + version + " expected " + TRAILER_VERSION); } long magic = StreamUtils.readLong(stream); if (magic != TRAILER_MAGIC) { - throw new InvalidWALDataException("Invalid Trailer magic. got " + magic + - " expected " + TRAILER_MAGIC); + throw new InvalidWALDataException( + "Invalid Trailer magic. got " + magic + " expected " + TRAILER_MAGIC); } long trailerOffset = StreamUtils.readLong(stream); @@ -203,10 +191,8 @@ public static ProcedureWALTrailer readTrailer(FSDataInputStream stream, long sta throw new InvalidWALDataException("Invalid Trailer begin"); } - ProcedureWALTrailer trailer = ProcedureWALTrailer.newBuilder() - .setVersion(version) - .setTrackerPos(stream.getPos()) - .build(); + ProcedureWALTrailer trailer = + ProcedureWALTrailer.newBuilder().setVersion(version).setTrackerPos(stream.getPos()).build(); return trailer; } @@ -214,8 +200,8 @@ public static ProcedureWALEntry readEntry(InputStream stream) throws IOException return ProcedureWALEntry.parseDelimitedFrom(stream); } - public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, - Procedure proc, Procedure[] subprocs) throws IOException { + public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, Procedure proc, + Procedure[] subprocs) throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(type); builder.addProcedure(ProcedureUtil.convertToProtoProcedure(proc)); @@ -227,8 +213,7 @@ public static void writeEntry(ByteSlot slot, ProcedureWALEntry.Type type, builder.build().writeDelimitedTo(slot); } - public static void writeInsert(ByteSlot slot, Procedure proc) - throws IOException { + public static void writeInsert(ByteSlot slot, Procedure proc) throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_INIT, proc, null); } @@ -237,13 +222,11 @@ public static void writeInsert(ByteSlot slot, Procedure proc, Procedure[] writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_INSERT, proc, subprocs); } - public static void writeUpdate(ByteSlot slot, Procedure proc) - throws IOException { + public static void writeUpdate(ByteSlot slot, Procedure proc) throws IOException { writeEntry(slot, ProcedureWALEntry.Type.PROCEDURE_WAL_UPDATE, proc, null); } - public static void writeDelete(ByteSlot slot, long procId) - throws IOException { + public static void writeDelete(ByteSlot slot, long procId) throws IOException { final ProcedureWALEntry.Builder builder = ProcedureWALEntry.newBuilder(); builder.setType(ProcedureWALEntry.Type.PROCEDURE_WAL_DELETE); builder.setProcId(procId); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java index 31150cad8fb2..f12abeb3ee47 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALFormatReader.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -56,11 +56,10 @@ class ProcedureWALFormatReader { private final ProcedureWALFormat.Loader loader; /** - * Global tracker that will be used by the WALProcedureStore after load. - * If the last WAL was closed cleanly we already have a full tracker ready to be used. - * If the last WAL was truncated (e.g. master killed) the tracker will be empty - * and the 'partial' flag will be set. In this case, on WAL replay we are going - * to rebuild the tracker. + * Global tracker that will be used by the WALProcedureStore after load. If the last WAL was + * closed cleanly we already have a full tracker ready to be used. If the last WAL was truncated + * (e.g. master killed) the tracker will be empty and the 'partial' flag will be set. In this + * case, on WAL replay we are going to rebuild the tracker. */ private final ProcedureStoreTracker tracker; diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java index 89e32c308807..06cf25c9c031 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPrettyPrinter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -63,13 +63,11 @@ public ProcedureWALPrettyPrinter() { /** * Reads a log file and outputs its contents. - * - * @param conf HBase configuration relevant to this log file - * @param p path of the log file to be read - * @throws IOException IOException + * @param conf HBase configuration relevant to this log file + * @param p path of the log file to be read + * @throws IOException IOException */ - public void processFile(final Configuration conf, final Path p) - throws IOException { + public void processFile(final Configuration conf, final Path p) throws IOException { FileSystem fs = p.getFileSystem(conf); if (!fs.exists(p)) { @@ -117,8 +115,7 @@ public void processProcedureWALFile(ProcedureWALFile log) throws IOException { } } catch (IOException e) { out.println("got an exception while reading the procedure WAL " + e.getMessage()); - } - finally { + } finally { log.close(); } } @@ -146,13 +143,10 @@ private void printHeader(ProcedureWALHeader header) { } /** - * Pass one or more log file names and formatting options and it will dump out - * a text version of the contents on stdout. - * - * @param args - * Command line arguments - * @throws IOException - * Thrown upon file system errors etc. + * Pass one or more log file names and formatting options and it will dump out a text version of + * the contents on stdout. + * @param args Command line arguments + * @throws IOException Thrown upon file system errors etc. */ @Override public int run(final String[] args) throws IOException { @@ -172,19 +166,19 @@ public int run(final String[] args) throws IOException { if (files.isEmpty() || cmd.hasOption("h")) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("ProcedureWALPrettyPrinter ", options, true); - return(-1); + return (-1); } } catch (ParseException e) { e.printStackTrace(); HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("ProcedureWALPrettyPrinter ", options, true); - return(-1); + return (-1); } // get configuration, file system, and process the given files for (Path file : files) { processFile(getConf(), file); } - return(0); + return (0); } public static void main(String[] args) throws Exception { diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java index 5e1983f46968..1e3423e56a27 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureMap.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java index d2d661f956ac..d4cc121b8c96 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/store/wal/WALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -99,8 +99,7 @@ * will first be initialized to the oldest file's tracker(which is stored in the trailer), using the * method {@link ProcedureStoreTracker#resetTo(ProcedureStoreTracker, boolean)}, and then merge it * with the tracker of every newer wal files, using the - * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. - * If we find out + * {@link ProcedureStoreTracker#setDeletedIfModifiedInBoth(ProcedureStoreTracker)}. If we find out * that all the modified procedures for the oldest wal file are modified or deleted in newer wal * files, then we can delete it. This is because that, every time we call * {@link ProcedureStore#insert(Procedure[])} or {@link ProcedureStore#update(Procedure)}, we will @@ -119,33 +118,31 @@ public class WALProcedureStore extends ProcedureStoreBase { /** Used to construct the name of the log directory for master procedures */ public static final String MASTER_PROCEDURE_LOGDIR = "MasterProcWALs"; - public static final String WAL_COUNT_WARN_THRESHOLD_CONF_KEY = - "hbase.procedure.store.wal.warn.threshold"; + "hbase.procedure.store.wal.warn.threshold"; private static final int DEFAULT_WAL_COUNT_WARN_THRESHOLD = 10; public static final String EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = - "hbase.procedure.store.wal.exec.cleanup.on.load"; + "hbase.procedure.store.wal.exec.cleanup.on.load"; private static final boolean DEFAULT_EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY = true; public static final String MAX_RETRIES_BEFORE_ROLL_CONF_KEY = - "hbase.procedure.store.wal.max.retries.before.roll"; + "hbase.procedure.store.wal.max.retries.before.roll"; private static final int DEFAULT_MAX_RETRIES_BEFORE_ROLL = 3; public static final String WAIT_BEFORE_ROLL_CONF_KEY = - "hbase.procedure.store.wal.wait.before.roll"; + "hbase.procedure.store.wal.wait.before.roll"; private static final int DEFAULT_WAIT_BEFORE_ROLL = 500; - public static final String ROLL_RETRIES_CONF_KEY = - "hbase.procedure.store.wal.max.roll.retries"; + public static final String ROLL_RETRIES_CONF_KEY = "hbase.procedure.store.wal.max.roll.retries"; private static final int DEFAULT_ROLL_RETRIES = 3; public static final String MAX_SYNC_FAILURE_ROLL_CONF_KEY = - "hbase.procedure.store.wal.sync.failure.roll.max"; + "hbase.procedure.store.wal.sync.failure.roll.max"; private static final int DEFAULT_MAX_SYNC_FAILURE_ROLL = 3; public static final String PERIODIC_ROLL_CONF_KEY = - "hbase.procedure.store.wal.periodic.roll.msec"; + "hbase.procedure.store.wal.periodic.roll.msec"; private static final int DEFAULT_PERIODIC_ROLL = 60 * 60 * 1000; // 1h public static final String SYNC_WAIT_MSEC_CONF_KEY = "hbase.procedure.store.wal.sync.wait.msec"; @@ -236,8 +233,8 @@ public float getSyncedPerSec() { public WALProcedureStore(Configuration conf, LeaseRecovery leaseRecovery) throws IOException { this(conf, new Path(CommonFSUtils.getWALRootDir(conf), MASTER_PROCEDURE_LOGDIR), - new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_OLDLOGDIR_NAME), - leaseRecovery); + new Path(CommonFSUtils.getWALRootDir(conf), HConstants.HREGION_OLDLOGDIR_NAME), + leaseRecovery); } public WALProcedureStore(final Configuration conf, final Path walDir, final Path walArchiveDir, @@ -247,8 +244,8 @@ public WALProcedureStore(final Configuration conf, final Path walDir, final Path this.walDir = walDir; this.walArchiveDir = walArchiveDir; this.fs = CommonFSUtils.getWALFileSystem(conf); - this.enforceStreamCapability = conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, - true); + this.enforceStreamCapability = + conf.getBoolean(CommonFSUtils.UNSAFE_STREAM_CAPABILITY_ENFORCE, true); // Create the log directory for the procedure store if (!fs.exists(walDir)) { @@ -289,9 +286,9 @@ public void start(int numSlots) throws IOException { // Tunings walCountWarnThreshold = - conf.getInt(WAL_COUNT_WARN_THRESHOLD_CONF_KEY, DEFAULT_WAL_COUNT_WARN_THRESHOLD); + conf.getInt(WAL_COUNT_WARN_THRESHOLD_CONF_KEY, DEFAULT_WAL_COUNT_WARN_THRESHOLD); maxRetriesBeforeRoll = - conf.getInt(MAX_RETRIES_BEFORE_ROLL_CONF_KEY, DEFAULT_MAX_RETRIES_BEFORE_ROLL); + conf.getInt(MAX_RETRIES_BEFORE_ROLL_CONF_KEY, DEFAULT_MAX_RETRIES_BEFORE_ROLL); maxSyncFailureRoll = conf.getInt(MAX_SYNC_FAILURE_ROLL_CONF_KEY, DEFAULT_MAX_SYNC_FAILURE_ROLL); waitBeforeRoll = conf.getInt(WAIT_BEFORE_ROLL_CONF_KEY, DEFAULT_WAIT_BEFORE_ROLL); rollRetries = conf.getInt(ROLL_RETRIES_CONF_KEY, DEFAULT_ROLL_RETRIES); @@ -301,8 +298,8 @@ public void start(int numSlots) throws IOException { useHsync = conf.getBoolean(USE_HSYNC_CONF_KEY, DEFAULT_USE_HSYNC); // WebUI - syncMetricsQueue = new CircularFifoQueue<>( - conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT)); + syncMetricsQueue = + new CircularFifoQueue<>(conf.getInt(STORE_WAL_SYNC_STATS_COUNT, DEFAULT_SYNC_STATS_COUNT)); // Init sync thread syncThread = new Thread("WALProcedureStoreSyncThread") { @@ -327,8 +324,8 @@ public void stop(final boolean abort) { return; } - LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + - (isSyncAborted() ? " (self aborting)" : "")); + LOG.info("Stopping the WAL Procedure Store, isAbort=" + abort + + (isSyncAborted() ? " (self aborting)" : "")); sendStopSignal(); if (!isSyncAborted()) { try { @@ -348,7 +345,7 @@ public void stop(final boolean abort) { // Close the old logs // they should be already closed, this is just in case the load fails // and we call start() and then stop() - for (ProcedureWALFile log: logs) { + for (ProcedureWALFile log : logs) { log.close(); } logs.clear(); @@ -403,8 +400,7 @@ public void recoverLease() throws IOException { while (isRunning()) { // Don't sleep before first attempt if (afterFirstAttempt) { - LOG.trace("Sleep {} ms after first lease recovery attempt.", - waitBeforeRoll); + LOG.trace("Sleep {} ms after first lease recovery attempt.", waitBeforeRoll); Threads.sleepWithoutInterrupt(waitBeforeRoll); } else { afterFirstAttempt = true; @@ -550,8 +546,9 @@ public void insert(Procedure proc, Procedure[] subprocs) { } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. - LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: proc=" + - proc + ", subprocs=" + Arrays.toString(subprocs), e); + LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: proc=" + proc + + ", subprocs=" + Arrays.toString(subprocs), + e); throw new RuntimeException(e); } finally { releaseSlot(slot); @@ -579,8 +576,8 @@ public void insert(Procedure[] procs) { } catch (IOException e) { // We are not able to serialize the procedure. // this is a code error, and we are not able to go on. - LOG.error(HBaseMarkers.FATAL, "Unable to serialize one of the procedure: " + - Arrays.toString(procs), e); + LOG.error(HBaseMarkers.FATAL, + "Unable to serialize one of the procedure: " + Arrays.toString(procs), e); throw new RuntimeException(e); } finally { releaseSlot(slot); @@ -704,10 +701,12 @@ private void releaseSlot(final ByteSlot slot) { slotsCache.offer(slot); } - private enum PushType { INSERT, UPDATE, DELETE }; + private enum PushType { + INSERT, UPDATE, DELETE + }; - private long pushData(final PushType type, final ByteSlot slot, - final long procId, final long[] subProcIds) { + private long pushData(final PushType type, final ByteSlot slot, final long procId, + final long[] subProcIds) { if (!isRunning()) { throw new RuntimeException("the store must be running before inserting data"); } @@ -766,8 +765,7 @@ private long pushData(final PushType type, final ByteSlot slot, return logId; } - private void updateStoreTracker(final PushType type, - final long procId, final long[] subProcIds) { + private void updateStoreTracker(final PushType type, final long procId, final long[] subProcIds) { switch (type) { case INSERT: if (subProcIds == null) { @@ -817,8 +815,8 @@ private void syncLoop() throws Throwable { if (LOG.isTraceEnabled()) { float rollTsSec = getMillisFromLastRoll() / 1000.0f; LOG.trace(String.format("Waiting for data. flushed=%s (%s/sec)", - StringUtils.humanSize(totalSynced.get()), - StringUtils.humanSize(totalSynced.get() / rollTsSec))); + StringUtils.humanSize(totalSynced.get()), + StringUtils.humanSize(totalSynced.get() / rollTsSec))); } waitCond.await(getMillisToNextPeriodicRoll(), TimeUnit.MILLISECONDS); @@ -841,9 +839,8 @@ private void syncLoop() throws Throwable { final float syncedPerSec = totalSyncedToStore / rollSec; if (LOG.isTraceEnabled() && (syncWaitMs > 10 || slotIndex < syncMaxSlot)) { LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s (%s/sec)", - StringUtils.humanTimeDiff(syncWaitMs), slotIndex, - StringUtils.humanSize(totalSyncedToStore), - StringUtils.humanSize(syncedPerSec))); + StringUtils.humanTimeDiff(syncWaitMs), slotIndex, + StringUtils.humanSize(totalSyncedToStore), StringUtils.humanSize(syncedPerSec))); } // update webui circular buffers (TODO: get rid of allocations) @@ -931,8 +928,8 @@ protected long syncSlots(final FSDataOutputStream stream, final ByteSlot[] slots sendPostSyncSignal(); if (LOG.isTraceEnabled()) { - LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + - ", flushed=" + StringUtils.humanSize(totalSynced)); + LOG.trace("Sync slots=" + count + '/' + syncMaxSlot + ", flushed=" + + StringUtils.humanSize(totalSynced)); } return totalSynced; } @@ -1005,7 +1002,7 @@ void removeInactiveLogsForTesting() throws Exception { lock.lock(); try { removeInactiveLogs(); - } finally { + } finally { lock.unlock(); } } @@ -1059,11 +1056,8 @@ boolean rollWriter(long logId) throws IOException { assert lock.isHeldByCurrentThread() : "expected to be the lock owner. " + lock.isLocked(); ProcedureWALHeader header = ProcedureWALHeader.newBuilder() - .setVersion(ProcedureWALFormat.HEADER_VERSION) - .setType(ProcedureWALFormat.LOG_TYPE_STREAM) - .setMinProcId(storeTracker.getActiveMinProcId()) - .setLogId(logId) - .build(); + .setVersion(ProcedureWALFormat.HEADER_VERSION).setType(ProcedureWALFormat.LOG_TYPE_STREAM) + .setMinProcId(storeTracker.getActiveMinProcId()).setLogId(logId).build(); FSDataOutputStream newStream = null; Path newLogFile = null; @@ -1083,11 +1077,11 @@ boolean rollWriter(long logId) throws IOException { // to provide. final String durability = useHsync ? StreamCapabilities.HSYNC : StreamCapabilities.HFLUSH; if (enforceStreamCapability && !newStream.hasCapability(durability)) { - throw new IllegalStateException("The procedure WAL relies on the ability to " + durability + - " for proper operation during component failures, but the underlying filesystem does " + - "not support doing so. Please check the config value of '" + USE_HSYNC_CONF_KEY + - "' to set the desired level of robustness and ensure the config value of '" + - CommonFSUtils.HBASE_WAL_DIR + "' points to a FileSystem mount that can provide it."); + throw new IllegalStateException("The procedure WAL relies on the ability to " + durability + + " for proper operation during component failures, but the underlying filesystem does " + + "not support doing so. Please check the config value of '" + USE_HSYNC_CONF_KEY + + "' to set the desired level of robustness and ensure the config value of '" + + CommonFSUtils.HBASE_WAL_DIR + "' points to a FileSystem mount that can provide it."); } try { ProcedureWALFormat.writeHeader(newStream, header); @@ -1112,8 +1106,9 @@ boolean rollWriter(long logId) throws IOException { if (logs.size() == 2) { buildHoldingCleanupTracker(); } else if (logs.size() > walCountWarnThreshold) { - LOG.warn("procedure WALs count={} above the warning threshold {}. check running procedures" + - " to see if something is stuck.", logs.size(), walCountWarnThreshold); + LOG.warn("procedure WALs count={} above the warning threshold {}. check running procedures" + + " to see if something is stuck.", + logs.size(), walCountWarnThreshold); // This is just like what we have done at RS side when there are too many wal files. For RS, // if there are too many wal files, we will find out the wal entries in the oldest file, and // tell the upper layer to flush these regions so the wal entries will be useless and then we @@ -1160,7 +1155,7 @@ private void closeCurrentLogStream(boolean abort) { } // ========================================================================== - // Log Files cleaner helpers + // Log Files cleaner helpers // ========================================================================== private void removeInactiveLogs() throws IOException { // We keep track of which procedures are holding the oldest WAL in 'holdingCleanupTracker'. @@ -1246,7 +1241,7 @@ private boolean removeLogFile(final ProcedureWALFile log, final Path walArchiveD } // ========================================================================== - // FileSystem Log Files helpers + // FileSystem Log Files helpers // ========================================================================== public Path getWALDir() { return this.walDir; @@ -1280,13 +1275,13 @@ public boolean accept(Path path) { private static final Comparator FILE_STATUS_ID_COMPARATOR = new Comparator() { - @Override - public int compare(FileStatus a, FileStatus b) { - final long aId = getLogIdFromName(a.getPath().getName()); - final long bId = getLogIdFromName(b.getPath().getName()); - return Long.compare(aId, bId); - } - }; + @Override + public int compare(FileStatus a, FileStatus b) { + final long aId = getLogIdFromName(a.getPath().getName()); + final long bId = getLogIdFromName(b.getPath().getName()); + return Long.compare(aId, bId); + } + }; private FileStatus[] getLogFiles() throws IOException { try { @@ -1392,27 +1387,27 @@ private ProcedureWALFile initOldLog(final FileStatus logFile, final Path walArch } /** - * Parses a directory of WALs building up ProcedureState. - * For testing parse and profiling. + * Parses a directory of WALs building up ProcedureState. For testing parse and profiling. * @param args Include pointer to directory of WAL files for a store instance to parse & load. */ - public static void main(String [] args) throws IOException { + public static void main(String[] args) throws IOException { Configuration conf = HBaseConfiguration.create(); if (args == null || args.length != 1) { System.out.println("ERROR: Empty arguments list; pass path to MASTERPROCWALS_DIR."); System.out.println("Usage: WALProcedureStore MASTERPROCWALS_DIR"); System.exit(-1); } - WALProcedureStore store = new WALProcedureStore(conf, new Path(args[0]), null, - new LeaseRecovery() { - @Override - public void recoverFileLease(FileSystem fs, Path path) throws IOException { - // no-op - } - }); + WALProcedureStore store = + new WALProcedureStore(conf, new Path(args[0]), null, new LeaseRecovery() { + @Override + public void recoverFileLease(FileSystem fs, Path path) throws IOException { + // no-op + } + }); try { store.start(16); - ProcedureExecutor pe = new ProcedureExecutor<>(conf, new Object()/*Pass anything*/, store); + ProcedureExecutor pe = + new ProcedureExecutor<>(conf, new Object()/* Pass anything */, store); pe.init(1, true); } finally { store.stop(true); diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java index 3e95de56f255..0a88b3fc2066 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/ByteSlot.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,20 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.procedure2.util; import java.io.IOException; import java.io.OutputStream; import java.util.Arrays; - import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; /** - * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. - * e.g. you write some data and you want to prepend an header that contains the data len or cksum. - * + * Similar to the ByteArrayOutputStream, with the exception that we can prepend an header. e.g. you + * write some data and you want to prepend an header that contains the data len or cksum. * ByteSlot slot = new ByteSlot(); * // write data * slot.write(...); @@ -78,13 +75,13 @@ public byte[] getBuffer() { public void writeAt(int offset, int b) { head = Math.min(head, offset); - buf[offset] = (byte)b; + buf[offset] = (byte) b; } @Override public void write(int b) { ensureCapacity(size + 1); - buf[size++] = (byte)b; + buf[size++] = (byte) b; } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java index fa796ae97426..32e5f5632bdb 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/DelayedUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.concurrent.DelayQueue; import java.util.concurrent.Delayed; import java.util.concurrent.TimeUnit; - import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.yetus.audience.InterfaceAudience; import org.apache.yetus.audience.InterfaceStability; @@ -30,7 +29,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public final class DelayedUtil { - private DelayedUtil() { } + private DelayedUtil() { + } /** * Add a timeout to a Delay @@ -104,7 +104,7 @@ public static int compareDelayed(final Delayed o1, final Delayed o2) { private static long getTimeout(final Delayed o) { assert o instanceof DelayedWithTimeout : "expected DelayedWithTimeout instance, got " + o; - return ((DelayedWithTimeout)o).getTimeout(); + return ((DelayedWithTimeout) o).getTimeout(); } public static abstract class DelayedObject implements DelayedWithTimeout { @@ -146,7 +146,7 @@ public boolean equals(final Object other) { return false; } - return Objects.equals(getObject(), ((DelayedContainer)other).getObject()); + return Objects.equals(getObject(), ((DelayedContainer) other).getObject()); } @Override diff --git a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java index fddc999bec3c..cddfd94d3da9 100644 --- a/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java +++ b/hbase-procedure/src/main/java/org/apache/hadoop/hbase/procedure2/util/StringUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,8 @@ @InterfaceAudience.Private @InterfaceStability.Evolving public final class StringUtils { - private StringUtils() {} + private StringUtils() { + } public static String humanTimeDiff(long timeDiff) { if (timeDiff < 1000) { @@ -31,17 +32,17 @@ public static String humanTimeDiff(long timeDiff) { } StringBuilder buf = new StringBuilder(); - long hours = timeDiff / (60*60*1000); - long rem = (timeDiff % (60*60*1000)); - long minutes = rem / (60*1000); - rem = rem % (60*1000); + long hours = timeDiff / (60 * 60 * 1000); + long rem = (timeDiff % (60 * 60 * 1000)); + long minutes = rem / (60 * 1000); + rem = rem % (60 * 1000); float seconds = rem / 1000.0f; - if (hours != 0){ + if (hours != 0) { buf.append(hours); buf.append(" hrs, "); } - if (minutes != 0){ + if (minutes != 0) { buf.append(minutes); buf.append(" mins, "); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java index 6c66a49c2018..cb9b91f69673 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/ProcedureTestingUtility.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -53,12 +53,12 @@ private ProcedureTestingUtility() { } public static ProcedureStore createStore(final Configuration conf, final Path dir) - throws IOException { + throws IOException { return createWalStore(conf, dir); } public static WALProcedureStore createWalStore(final Configuration conf, final Path dir) - throws IOException { + throws IOException { return new WALProcedureStore(conf, dir, null, new LeaseRecovery() { @Override public void recoverFileLease(FileSystem fs, Path path) throws IOException { @@ -68,12 +68,12 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { } public static void restart(final ProcedureExecutor procExecutor, boolean abort, - boolean startWorkers) throws Exception { + boolean startWorkers) throws Exception { restart(procExecutor, false, true, null, null, null, abort, startWorkers); } public static void restart(final ProcedureExecutor procExecutor, boolean abort) - throws Exception { + throws Exception { restart(procExecutor, false, true, null, null, null, abort, true); } @@ -82,12 +82,12 @@ public static void restart(final ProcedureExecutor procExecutor) th } public static void initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads, - boolean abortOnCorruption) throws IOException { + boolean abortOnCorruption) throws IOException { initAndStartWorkers(procExecutor, numThreads, abortOnCorruption, true); } public static void initAndStartWorkers(ProcedureExecutor procExecutor, int numThreads, - boolean abortOnCorruption, boolean startWorkers) throws IOException { + boolean abortOnCorruption, boolean startWorkers) throws IOException { procExecutor.init(numThreads, abortOnCorruption); if (startWorkers) { procExecutor.startWorkers(); @@ -95,16 +95,16 @@ public static void initAndStartWorkers(ProcedureExecutor procExecutor, int nu } public static void restart(ProcedureExecutor procExecutor, - boolean avoidTestKillDuringRestart, boolean failOnCorrupted, Callable stopAction, - Callable actionBeforeStartWorker, Callable startAction) throws Exception { + boolean avoidTestKillDuringRestart, boolean failOnCorrupted, Callable stopAction, + Callable actionBeforeStartWorker, Callable startAction) throws Exception { restart(procExecutor, avoidTestKillDuringRestart, failOnCorrupted, stopAction, actionBeforeStartWorker, startAction, false, true); } public static void restart(ProcedureExecutor procExecutor, - boolean avoidTestKillDuringRestart, boolean failOnCorrupted, Callable stopAction, - Callable actionBeforeStartWorker, Callable startAction, boolean abort, - boolean startWorkers) throws Exception { + boolean avoidTestKillDuringRestart, boolean failOnCorrupted, Callable stopAction, + Callable actionBeforeStartWorker, Callable startAction, boolean abort, + boolean startWorkers) throws Exception { final ProcedureStore procStore = procExecutor.getStore(); final int storeThreads = procExecutor.getCorePoolSize(); final int execThreads = procExecutor.getCorePoolSize(); @@ -145,12 +145,12 @@ public static void restart(ProcedureExecutor procExecutor, } public static void storeRestart(ProcedureStore procStore, ProcedureStore.ProcedureLoader loader) - throws Exception { + throws Exception { storeRestart(procStore, false, loader); } public static void storeRestart(ProcedureStore procStore, boolean abort, - ProcedureStore.ProcedureLoader loader) throws Exception { + ProcedureStore.ProcedureLoader loader) throws Exception { procStore.stop(abort); procStore.start(procStore.getNumThreads()); procStore.recoverLease(); @@ -158,7 +158,7 @@ public static void storeRestart(ProcedureStore procStore, boolean abort, } public static LoadCounter storeRestartAndAssert(ProcedureStore procStore, long maxProcId, - long runnableCount, int completedCount, int corruptedCount) throws Exception { + long runnableCount, int completedCount, int corruptedCount) throws Exception { final LoadCounter loader = new LoadCounter(); storeRestart(procStore, loader); assertEquals(maxProcId, loader.getMaxProcId()); @@ -175,19 +175,19 @@ private static void createExecutorTesting(final ProcedureExecutor p } public static void setKillIfHasParent(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { createExecutorTesting(procExecutor); procExecutor.testing.killIfHasParent = value; } public static void setKillIfSuspended(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { createExecutorTesting(procExecutor); procExecutor.testing.killIfSuspended = value; } public static void setKillBeforeStoreUpdate(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { createExecutorTesting(procExecutor); procExecutor.testing.killBeforeStoreUpdate = value; LOG.warn("Set Kill before store update to: " + procExecutor.testing.killBeforeStoreUpdate); @@ -195,7 +195,7 @@ public static void setKillBeforeStoreUpdate(ProcedureExecutor procE } public static void setToggleKillBeforeStoreUpdate(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { createExecutorTesting(procExecutor); procExecutor.testing.toggleKillBeforeStoreUpdate = value; assertSingleExecutorForKillTests(procExecutor); @@ -216,27 +216,27 @@ public static void toggleKillAfterStoreUpdate(ProcedureExecutor pro } public static void setKillAndToggleBeforeStoreUpdate(ProcedureExecutor procExecutor, - boolean value) { + boolean value) { ProcedureTestingUtility.setKillBeforeStoreUpdate(procExecutor, value); ProcedureTestingUtility.setToggleKillBeforeStoreUpdate(procExecutor, value); assertSingleExecutorForKillTests(procExecutor); } private static void - assertSingleExecutorForKillTests(final ProcedureExecutor procExecutor) { + assertSingleExecutorForKillTests(final ProcedureExecutor procExecutor) { if (procExecutor.testing == null) { return; } - if (procExecutor.testing.killBeforeStoreUpdate || - procExecutor.testing.toggleKillBeforeStoreUpdate) { + if (procExecutor.testing.killBeforeStoreUpdate + || procExecutor.testing.toggleKillBeforeStoreUpdate) { assertEquals("expected only one executor running during test with kill/restart", 1, procExecutor.getCorePoolSize()); } } public static long submitAndWait(Configuration conf, TEnv env, Procedure proc) - throws IOException { + throws IOException { NoopProcedureStore procStore = new NoopProcedureStore(); ProcedureExecutor procExecutor = new ProcedureExecutor<>(conf, env, procStore); procStore.start(1); @@ -254,14 +254,14 @@ public static long submitAndWait(ProcedureExecutor procExecutor, Pr } public static long submitAndWait(ProcedureExecutor procExecutor, Procedure proc, - final long nonceGroup, final long nonce) { + final long nonceGroup, final long nonce) { long procId = submitProcedure(procExecutor, proc, nonceGroup, nonce); waitProcedure(procExecutor, procId); return procId; } public static long submitProcedure(ProcedureExecutor procExecutor, Procedure proc, - final long nonceGroup, final long nonce) { + final long nonceGroup, final long nonce) { final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce); long procId = procExecutor.registerNonce(nonceKey); assertFalse(procId >= 0); @@ -307,7 +307,7 @@ public static void waitNoProcedureRunning(ProcedureExecutor procExe } public static void assertProcNotYetCompleted(ProcedureExecutor procExecutor, - long procId) { + long procId) { assertFalse("expected a running proc", procExecutor.isFinished(procId)); assertEquals(null, procExecutor.getResult(procId)); } @@ -323,7 +323,7 @@ public static void assertProcNotFailed(final Procedure result) { } public static Throwable assertProcFailed(final ProcedureExecutor procExecutor, - final long procId) { + final long procId) { Procedure result = procExecutor.getResult(procId); assertTrue("expected procedure result", result != null); return assertProcFailed(result); @@ -372,17 +372,18 @@ public static Throwable getExceptionCause(final Procedure procInfo) { * This is a good test for finding state that needs persisting and steps that are not idempotent. */ public static void testRecoveryAndDoubleExecution(final ProcedureExecutor procExec, - final long procId) throws Exception { + final long procId) throws Exception { testRecoveryAndDoubleExecution(procExec, procId, false); } public static void testRecoveryAndDoubleExecution(final ProcedureExecutor procExec, - final long procId, final boolean expectFailure) throws Exception { + final long procId, final boolean expectFailure) throws Exception { testRecoveryAndDoubleExecution(procExec, procId, expectFailure, null); } public static void testRecoveryAndDoubleExecution(final ProcedureExecutor procExec, - final long procId, final boolean expectFailure, final Runnable customRestart) throws Exception { + final long procId, final boolean expectFailure, final Runnable customRestart) + throws Exception { Procedure proc = procExec.getProcedure(procId); waitProcedure(procExec, procId); assertEquals(false, procExec.isRunning()); @@ -411,7 +412,7 @@ public NoopProcedure() { @Override protected Procedure[] execute(TEnv env) - throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { return null; } @@ -434,7 +435,7 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) throws } public static class NoopStateMachineProcedure - extends StateMachineProcedure { + extends StateMachineProcedure { private TState initialState; private TEnv env; @@ -448,7 +449,7 @@ public NoopStateMachineProcedure(TEnv env, TState initialState) { @Override protected Flow executeFromState(TEnv env, TState tState) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { return null; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java index 4f3c443faa98..4594025166e8 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestChildProcedures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestChildProcedures { @ClassRule @@ -109,11 +109,9 @@ public void testChildLoadWithSteppedRestart() throws Exception { ProcedureTestingUtility.assertProcNotFailed(procExecutor, procId); } - /** - * Test the state setting that happens after store to WAL; in particular the bit where we - * set the parent runnable again after its children have all completed successfully. - * See HBASE-20978. + * Test the state setting that happens after store to WAL; in particular the bit where we set the + * parent runnable again after its children have all completed successfully. See HBASE-20978. */ @Test public void testChildLoadWithRestartAfterChildSuccess() throws Exception { @@ -170,7 +168,8 @@ private void assertProcFailed(long procId) { } public static class TestRootProcedure extends SequentialProcedure { - public TestRootProcedure() {} + public TestRootProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) { @@ -194,7 +193,8 @@ public boolean abort(TestProcEnv env) { } public static class TestChildProcedure extends SequentialProcedure { - public TestChildProcedure() {} + public TestChildProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java index 178b0cbc98af..b53a25184696 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestForceUpdateProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +48,7 @@ public class TestForceUpdateProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestForceUpdateProcedure.class); + HBaseClassTestRule.forClass(TestForceUpdateProcedure.class); private static HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java index 9f24403dc7d4..2def0769ae90 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestLockAndQueue.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestLockAndQueue { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestLockAndQueue.class); + HBaseClassTestRule.forClass(TestLockAndQueue.class); @Test public void testHasLockAccess() { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java index 976b718d79bb..8ccccd34c86b 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureBypass.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.stream.Collectors; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseClassTestRule; @@ -40,12 +39,12 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos; - -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureBypass { - @ClassRule public static final HBaseClassTestRule CLASS_RULE = HBaseClassTestRule - .forClass(TestProcedureBypass.class); + @ClassRule + public static final HBaseClassTestRule CLASS_RULE = + HBaseClassTestRule.forClass(TestProcedureBypass.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureBypass.class); @@ -77,11 +76,9 @@ public static void setUp() throws Exception { logDir = new Path(testDir, "proc-logs"); procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir); - procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, - procStore); + procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), procEnv, procStore); procStore.start(PROCEDURE_EXECUTOR_SLOTS); - ProcedureTestingUtility - .initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); + ProcedureTestingUtility.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); } @Test @@ -89,7 +86,7 @@ public void testBypassSuspendProcedure() throws Exception { final SuspendProcedure proc = new SuspendProcedure(); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); - //bypass the procedure + // bypass the procedure assertTrue(procExecutor.bypassProcedure(id, 30000, false, false)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -100,9 +97,9 @@ public void testStuckProcedure() throws Exception { final StuckProcedure proc = new StuckProcedure(); long id = procExecutor.submitProcedure(proc); Thread.sleep(500); - //bypass the procedure + // bypass the procedure assertTrue(procExecutor.bypassProcedure(id, 1000, true, false)); - //Since the procedure is stuck there, we need to restart the executor to recovery. + // Since the procedure is stuck there, we need to restart the executor to recovery. ProcedureTestingUtility.restart(procExecutor); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); LOG.info("{} finished", proc); @@ -113,9 +110,8 @@ public void testBypassingProcedureWithParent() throws Exception { final RootProcedure proc = new RootProcedure(); long rootId = procExecutor.submitProcedure(proc); htu.waitFor(5000, () -> procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()) - .size() > 0); - SuspendProcedure suspendProcedure = (SuspendProcedure)procExecutor.getProcedures().stream() + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).size() > 0); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().stream() .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); assertTrue(procExecutor.bypassProcedure(suspendProcedure.getProcId(), 1000, false, false)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); @@ -141,9 +137,8 @@ public void testBypassingProcedureWithParentRecursive() throws Exception { final RootProcedure proc = new RootProcedure(); long rootId = procExecutor.submitProcedure(proc); htu.waitFor(5000, () -> procExecutor.getProcedures().stream() - .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()) - .size() > 0); - SuspendProcedure suspendProcedure = (SuspendProcedure)procExecutor.getProcedures().stream() + .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).size() > 0); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().stream() .filter(p -> p.getParentProcId() == rootId).collect(Collectors.toList()).get(0); assertTrue(procExecutor.bypassProcedure(rootId, 1000, false, true)); htu.waitFor(5000, () -> proc.isSuccess() && proc.isBypass()); @@ -176,8 +171,7 @@ public SuspendProcedure() { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { // Always suspend the procedure throw new ProcedureSuspendedException(); } @@ -201,7 +195,6 @@ protected Procedure[] execute(final TestProcEnv env) { } - public static class RootProcedure extends ProcedureTestingUtility.NoopProcedure { private boolean childSpwaned = false; @@ -210,11 +203,10 @@ public RootProcedure() { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { if (!childSpwaned) { childSpwaned = true; - return new Procedure[] {new SuspendProcedure()}; + return new Procedure[] { new SuspendProcedure() }; } else { return null; } @@ -228,8 +220,7 @@ public WaitingTimeoutProcedure() { } @Override - protected Procedure[] execute(final TestProcEnv env) - throws ProcedureSuspendedException { + protected Procedure[] execute(final TestProcEnv env) throws ProcedureSuspendedException { // Always suspend the procedure setTimeout(50000); setState(ProcedureProtos.ProcedureState.WAITING_TIMEOUT); @@ -263,7 +254,7 @@ public StuckStateMachineProcedure(TestProcEnv env, StuckStateMachineState initia @Override protected Flow executeFromState(TestProcEnv env, StuckStateMachineState tState) - throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { + throws ProcedureSuspendedException, ProcedureYieldException, InterruptedException { switch (tState) { case START: LOG.info("PHASE 1: START"); @@ -292,5 +283,4 @@ protected int getStateId(StuckStateMachineState tState) { } } - } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java index 82917ea5315c..baf9e58cfb2f 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureCleanup.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -49,8 +49,7 @@ public class TestProcedureCleanup { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureCleanup.class); - + HBaseClassTestRule.forClass(TestProcedureCleanup.class); private static final Logger LOG = LoggerFactory.getLogger(TestProcedureCleanup.class); @@ -95,8 +94,7 @@ public void testProcedureShouldNotCleanOnLoad() throws Exception { LOG.info("Begin to execute " + rootProc); // wait until the child procedure arrival htu.waitFor(10000, () -> procExecutor.getProcedures().size() >= 2); - SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor - .getProcedures().get(1); + SuspendProcedure suspendProcedure = (SuspendProcedure) procExecutor.getProcedures().get(1); // wait until the suspendProcedure executed suspendProcedure.latch.countDown(); Thread.sleep(100); @@ -181,14 +179,13 @@ private void corrupt(FileStatus file) throws IOException { Path tmpFile = file.getPath().suffix(".tmp"); // remove the last byte to make the trailer corrupted try (FSDataInputStream in = fs.open(file.getPath()); - FSDataOutputStream out = fs.create(tmpFile)) { + FSDataOutputStream out = fs.create(tmpFile)) { ByteStreams.copy(ByteStreams.limit(in, file.getLen() - 1), out); } fs.delete(file.getPath(), false); fs.rename(tmpFile, file.getPath()); } - public static final class ExchangeProcedure extends ProcedureTestingUtility.NoopProcedure { private final Exchanger exchanger = new Exchanger<>(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java index f13a46c2f7c0..581f4825539e 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureEvents.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureEvents { @ClassRule @@ -81,11 +81,10 @@ public void tearDown() throws IOException { } /** - * Tests being able to suspend a Procedure for N timeouts and then failing.s - * Resets the timeout after each elapses. See {@link TestTimeoutEventProcedure} for example - * of how to do this sort of trickery with the ProcedureExecutor; i.e. suspend for a while, - * check for a condition and if not set, suspend again, etc., ultimately failing or succeeding - * eventually. + * Tests being able to suspend a Procedure for N timeouts and then failing.s Resets the timeout + * after each elapses. See {@link TestTimeoutEventProcedure} for example of how to do this sort of + * trickery with the ProcedureExecutor; i.e. suspend for a while, check for a condition and if not + * set, suspend again, etc., ultimately failing or succeeding eventually. */ @Test public void testTimeoutEventProcedure() throws Exception { @@ -122,20 +121,19 @@ private void testTimeoutEventProcedureDoubleExecution(final boolean killIfSuspen /** * This Event+Procedure exhibits following behavior: *

      - *
    • On procedure execute() - *
        - *
      • If had enough timeouts, abort the procedure. Else....
      • - *
      • Suspend the event and add self to its suspend queue
      • - *
      • Go into waiting state
      • - *
      - *
    • - *
    • - * On waiting timeout - *
        - *
      • Wake the event (which adds this procedure back into scheduler queue), and set own's - * state to RUNNABLE (so can be executed again).
      • - *
      - *
    • + *
    • On procedure execute() + *
        + *
      • If had enough timeouts, abort the procedure. Else....
      • + *
      • Suspend the event and add self to its suspend queue
      • + *
      • Go into waiting state
      • + *
      + *
    • + *
    • On waiting timeout + *
        + *
      • Wake the event (which adds this procedure back into scheduler queue), and set own's state + * to RUNNABLE (so can be executed again).
      • + *
      + *
    • *
    */ public static class TestTimeoutEventProcedure extends NoopProcedure { @@ -144,7 +142,8 @@ public static class TestTimeoutEventProcedure extends NoopProcedure private final AtomicInteger ntimeouts = new AtomicInteger(0); private int maxTimeouts = 1; - public TestTimeoutEventProcedure() {} + public TestTimeoutEventProcedure() { + } public TestTimeoutEventProcedure(final int timeoutMsec, final int maxTimeouts) { this.maxTimeouts = maxTimeouts; @@ -190,8 +189,7 @@ protected void afterReplay(final TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { Int32Value.Builder ntimeoutsBuilder = Int32Value.newBuilder().setValue(ntimeouts.get()); serializer.serialize(ntimeoutsBuilder.build()); @@ -200,8 +198,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { Int32Value ntimeoutsValue = serializer.deserialize(Int32Value.class); ntimeouts.set(ntimeoutsValue.getValue()); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java index bbe37780d2c8..dc75394b960a 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecution.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -42,7 +42,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureExecution { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -178,7 +178,7 @@ public void testSingleSequentialProc() { public void testSingleSequentialProcRollback() { List state = new ArrayList<>(); Procedure subProc2 = - new TestSequentialProcedure("subProc2", state, new TestProcedureException("fail test")); + new TestSequentialProcedure("subProc2", state, new TestProcedureException("fail test")); Procedure subProc1 = new TestSequentialProcedure("subProc1", state, subProc2); Procedure rootProc = new TestSequentialProcedure("rootProc", state, subProc1); long rootId = ProcedureTestingUtility.submitAndWait(procExecutor, rootProc); @@ -204,7 +204,8 @@ public void testSingleSequentialProcRollback() { public static class TestFaultyRollback extends SequentialProcedure { private int retries = 0; - public TestFaultyRollback() { } + public TestFaultyRollback() { + } @Override protected Procedure[] execute(Void env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java index 7f130caf4a7c..77a3d8c1cac0 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureExecutor.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureExecutor { @ClassRule @@ -155,8 +155,8 @@ private int waitThreadCount(final int expectedThreads) { if (procExecutor.getWorkerThreadCount() == expectedThreads) { break; } - LOG.debug("waiting for thread count=" + expectedThreads + - " current=" + procExecutor.getWorkerThreadCount()); + LOG.debug("waiting for thread count=" + expectedThreads + " current=" + + procExecutor.getWorkerThreadCount()); Threads.sleepWithoutInterrupt(250); } return procExecutor.getWorkerThreadCount(); @@ -189,5 +189,6 @@ protected Procedure[] execute(final TestProcEnv env) { } } - private static class TestProcEnv { } + private static class TestProcEnv { + } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java index 75c8d16485b6..8a05283bc712 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureInMemoryChore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -36,7 +36,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureInMemoryChore { @ClassRule diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java index 4d9df1a53890..8bb90f98f516 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureMetrics.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -37,7 +37,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureMetrics { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -204,7 +204,7 @@ public ProcedureMetrics(boolean success, ProcedureMetrics[] subprocs) { } public ProcedureMetrics(boolean success, boolean yield, int yieldCount, - ProcedureMetrics[] subprocs) { + ProcedureMetrics[] subprocs) { this.success = success; this.yield = yield; this.yieldCount = yieldCount; @@ -218,8 +218,8 @@ protected void updateMetricsOnSubmit(TestProcEnv env) { } @Override - protected Procedure[] execute(TestProcEnv env) throws ProcedureYieldException, - ProcedureSuspendedException, InterruptedException { + protected Procedure[] execute(TestProcEnv env) + throws ProcedureYieldException, ProcedureSuspendedException, InterruptedException { if (this.yield) { if (yieldNum < yieldCount) { yieldNum++; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java index e4d039085da5..84c3db4e6cb1 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureNonce.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -43,7 +43,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureNonce { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -174,8 +174,8 @@ public void testConcurrentNonceRegistrationWithRollback() throws IOException { testConcurrentNonceRegistration(false, 890, 55555); } - private void testConcurrentNonceRegistration(final boolean submitProcedure, - final long nonceGroup, final long nonce) throws IOException { + private void testConcurrentNonceRegistration(final boolean submitProcedure, final long nonceGroup, + final long nonce) throws IOException { // register the nonce final NonceKey nonceKey = procExecutor.createNonceKey(nonceGroup, nonce); @@ -229,8 +229,7 @@ public void run() { // register the nonce t2BeforeNonceRegisteredLatch.countDown(); - assertFalse("unexpected non registered nonce", - procExecutor.registerNonce(nonceKey) < 0); + assertFalse("unexpected non registered nonce", procExecutor.registerNonce(nonceKey) < 0); } catch (Throwable e) { t2Exception.set(e); } finally { @@ -256,7 +255,8 @@ public void run() { public static class TestSingleStepProcedure extends SequentialProcedure { private int step = 0; - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -269,7 +269,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } @Override - protected void rollback(TestProcEnv env) { } + protected void rollback(TestProcEnv env) { + } @Override protected boolean abort(TestProcEnv env) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java index acfd4f4a6969..59e8b311c043 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRecovery.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Int32Value; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureRecovery { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -98,7 +98,8 @@ private void restart() throws Exception { public static class TestSingleStepProcedure extends SequentialProcedure { private int step = 0; - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -110,7 +111,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } @Override - protected void rollback(TestProcEnv env) { } + protected void rollback(TestProcEnv env) { + } @Override protected boolean abort(TestProcEnv env) { @@ -130,8 +132,7 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { step++; Threads.sleepWithoutInterrupt(procSleepInterval); if (isAborted()) { - setFailure(new RemoteProcedureException(getClass().getName(), - new ProcedureAbortedException( + setFailure(new RemoteProcedureException(getClass().getName(), new ProcedureAbortedException( "got an abort at " + getClass().getName() + " step=" + step))); return null; } @@ -155,7 +156,7 @@ private boolean isAborted() { boolean aborted = abort.get(); BaseTestStepProcedure proc = this; while (proc.hasParent() && !aborted) { - proc = (BaseTestStepProcedure)procExecutor.getProcedure(proc.getParentProcId()); + proc = (BaseTestStepProcedure) procExecutor.getProcedure(proc.getParentProcId()); aborted = proc.isAborted(); } return aborted; @@ -163,7 +164,8 @@ private boolean isAborted() { } public static class TestMultiStepProcedure extends BaseTestStepProcedure { - public TestMultiStepProcedure() { } + public TestMultiStepProcedure() { + } @Override public Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -172,7 +174,8 @@ public Procedure[] execute(TestProcEnv env) throws InterruptedException { } public static class Step1Procedure extends BaseTestStepProcedure { - public Step1Procedure() { } + public Step1Procedure() { + } @Override protected Procedure[] execute(TestProcEnv env) throws InterruptedException { @@ -182,7 +185,8 @@ protected Procedure[] execute(TestProcEnv env) throws InterruptedException { } public static class Step2Procedure extends BaseTestStepProcedure { - public Step2Procedure() { } + public Step2Procedure() { + } } } @@ -295,9 +299,12 @@ public void testMultiStepRollbackRecovery() throws Exception { public static class TestStateMachineProcedure extends StateMachineProcedure { - enum State { STATE_1, STATE_2, STATE_3, DONE } + enum State { + STATE_1, STATE_2, STATE_3, DONE + } - public TestStateMachineProcedure() {} + public TestStateMachineProcedure() { + } public TestStateMachineProcedure(final boolean testSubmitChildProc) { this.submitChildProc = testSubmitChildProc; @@ -388,16 +395,14 @@ protected boolean abort(TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { super.serializeStateData(serializer); Int32Value.Builder builder = Int32Value.newBuilder().setValue(iResult); serializer.serialize(builder.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { super.deserializeStateData(serializer); Int32Value value = serializer.deserialize(Int32Value.class); iResult = value.getValue(); @@ -515,7 +520,7 @@ private void dumpLogDirState() { try { FileStatus[] files = fs.listStatus(logDir); if (files != null && files.length > 0) { - for (FileStatus file: files) { + for (FileStatus file : files) { assertTrue(file.toString(), file.isFile()); LOG.debug("log file " + file.getPath() + " size=" + file.getLen()); } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java index db54f22f535b..7ef21f890f25 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureReplayOrder.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; + import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.atomic.AtomicLong; @@ -141,8 +142,8 @@ private void submitProcedures(final int nthreads, final int nprocPerThread, public void run() { for (int i = 0; i < nprocPerThread; ++i) { try { - procExecutor.submitProcedure((Procedure) - procClazz.getDeclaredConstructor().newInstance()); + procExecutor + .submitProcedure((Procedure) procClazz.getDeclaredConstructor().newInstance()); } catch (Exception e) { LOG.error("unable to instantiate the procedure", e); fail("failure during the proc.newInstance(): " + e.getMessage()); @@ -197,7 +198,8 @@ public long getExecId() { } @Override - protected void rollback(TestProcedureEnv env) { } + protected void rollback(TestProcedureEnv env) { + } @Override protected boolean abort(TestProcedureEnv env) { @@ -205,15 +207,13 @@ protected boolean abort(TestProcedureEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { Int64Value.Builder builder = Int64Value.newBuilder().setValue(execId); serializer.serialize(builder.build()); } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { Int64Value value = serializer.deserialize(Int64Value.class); execId = value.getValue(); step = 2; @@ -221,7 +221,8 @@ protected void deserializeStateData(ProcedureStateSerializer serializer) } public static class TestSingleStepProcedure extends TestProcedure { - public TestSingleStepProcedure() { } + public TestSingleStepProcedure() { + } @Override protected Procedure[] execute(TestProcedureEnv env) throws ProcedureYieldException { @@ -244,7 +245,8 @@ public String toString() { } public static class TestTwoStepProcedure extends TestProcedure { - public TestTwoStepProcedure() { } + public TestTwoStepProcedure() { + } @Override protected Procedure[] execute(TestProcedureEnv env) throws ProcedureYieldException { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java index fc12924244e1..4e58c5e3bf37 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureRollbackAIOOB.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java index f56cdb31b6b8..52e768ee96fe 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSchedulerConcurrency.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureSchedulerConcurrency { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -105,10 +105,10 @@ public void run() { } if (wakeCount.get() != oldWakeCount) { lastUpdate = EnvironmentEdgeManager.currentTime(); - } else if (wakeCount.get() >= NRUNS && - (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) { - break; - } + } else if (wakeCount.get() >= NRUNS + && (EnvironmentEdgeManager.currentTime() - lastUpdate) > WAIT_THRESHOLD) { + break; + } Threads.sleepWithoutInterrupt(25); } } @@ -119,7 +119,7 @@ public void run() { @Override public void run() { while (true) { - TestProcedureWithEvent proc = (TestProcedureWithEvent)sched.poll(); + TestProcedureWithEvent proc = (TestProcedureWithEvent) sched.poll(); if (proc == null) { continue; } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java index 4b8879285144..574613765365 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSkipPersistence.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -41,7 +41,7 @@ public class TestProcedureSkipPersistence { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureSkipPersistence.class); + HBaseClassTestRule.forClass(TestProcedureSkipPersistence.class); private ProcedureExecutor procExecutor; private ProcedureStore procStore; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java index d17c00c620fb..6695dd8969ce 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureSuspended.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureSuspended { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -181,8 +181,8 @@ public static class TestLockProcedure extends Procedure { private AtomicBoolean lock = null; private boolean hasLock = false; - public TestLockProcedure(final AtomicBoolean lock, final String key, - final boolean throwYield, final boolean throwSuspend) { + public TestLockProcedure(final AtomicBoolean lock, final String key, final boolean throwYield, + final boolean throwSuspend) { this.lock = lock; this.key = key; this.throwYield = throwYield; @@ -259,13 +259,11 @@ protected boolean abort(TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java index 807614529fc2..922b79e30326 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureToString.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -30,7 +30,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProcedureProtos.ServerCrashState; import org.apache.hadoop.hbase.shaded.protobuf.generated.ProcedureProtos.ProcedureState; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestProcedureToString { @ClassRule @@ -40,7 +40,8 @@ public class TestProcedureToString { /** * A do-nothing environment for BasicProcedure. */ - static class BasicProcedureEnv {}; + static class BasicProcedureEnv { + }; /** * A do-nothing basic procedure just for testing toString. @@ -49,7 +50,7 @@ static class BasicProcedure extends Procedure { @Override protected Procedure[] execute(BasicProcedureEnv env) throws ProcedureYieldException, InterruptedException { - return new Procedure [] {this}; + return new Procedure[] { this }; } @Override @@ -62,13 +63,11 @@ protected boolean abort(BasicProcedureEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -106,17 +105,17 @@ public void testBasicToString() { * Do-nothing SimpleMachineProcedure for checking its toString. */ static class SimpleStateMachineProcedure - extends StateMachineProcedure { + extends StateMachineProcedure { @Override - protected org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow executeFromState( - BasicProcedureEnv env, ServerCrashState state) + protected org.apache.hadoop.hbase.procedure2.StateMachineProcedure.Flow + executeFromState(BasicProcedureEnv env, ServerCrashState state) throws ProcedureYieldException, InterruptedException { return null; } @Override - protected void rollbackState(BasicProcedureEnv env, ServerCrashState state) throws IOException, - InterruptedException { + protected void rollbackState(BasicProcedureEnv env, ServerCrashState state) + throws IOException, InterruptedException { } @Override diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java index 4d57c37ac619..cea18cb47030 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestProcedureUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -34,7 +34,7 @@ public class TestProcedureUtil { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureUtil.class); + HBaseClassTestRule.forClass(TestProcedureUtil.class); @Test public void testValidation() throws Exception { @@ -51,13 +51,14 @@ public void testConvert() throws Exception { // check Procedure to protobuf conversion final TestProcedure proc1 = new TestProcedure(10, 1, new byte[] { 65 }); final ProcedureProtos.Procedure proto1 = ProcedureUtil.convertToProtoProcedure(proc1); - final TestProcedure proc2 = (TestProcedure)ProcedureUtil.convertToProcedure(proto1); + final TestProcedure proc2 = (TestProcedure) ProcedureUtil.convertToProcedure(proto1); final ProcedureProtos.Procedure proto2 = ProcedureUtil.convertToProtoProcedure(proc2); assertEquals(false, proto2.hasResult()); assertEquals("Procedure protobuf does not match", proto1, proto2); } public static class TestProcedureNoDefaultConstructor extends TestProcedure { - public TestProcedureNoDefaultConstructor(int x) {} + public TestProcedureNoDefaultConstructor(int x) { + } } } diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java index 7f44fc31322d..6cbad6df8d4e 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestRemoteProcedureDispatcherUncaughtExceptionHandler.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class TestRemoteProcedureDispatcherUncaughtExceptionHandler { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestRemoteProcedureDispatcherUncaughtExceptionHandler.class); + HBaseClassTestRule.forClass(TestRemoteProcedureDispatcherUncaughtExceptionHandler.class); private static final class ExceptionHandler implements UncaughtExceptionHandler { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java index b076fb906ac8..4d85de33cee5 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestStateMachineProcedure.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -38,7 +38,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestStateMachineProcedure { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -62,7 +62,7 @@ public boolean equals(final Object other) { // we are going to serialize the exception in the test, // so the instance comparison will not match - return getMessage().equals(((Exception)other).getMessage()); + return getMessage().equals(((Exception) other).getMessage()); } @Override @@ -179,7 +179,10 @@ public void testChildOnLastStepWithRollbackDoubleExecution() throws Exception { assertEquals(TEST_FAILURE_EXCEPTION, cause); } - public enum TestSMProcedureState { STEP_1, STEP_2 }; + public enum TestSMProcedureState { + STEP_1, STEP_2 + }; + public static class TestSMProcedure extends StateMachineProcedure { @Override @@ -227,7 +230,7 @@ protected TestSMProcedureState getInitialState() { } public static class TestSMProcedureBadRollback - extends StateMachineProcedure { + extends StateMachineProcedure { @Override protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { LOG.info("EXEC " + state + " " + this); @@ -244,6 +247,7 @@ protected Flow executeFromState(TestProcEnv env, TestSMProcedureState state) { } return Flow.HAS_MORE_STATE; } + @Override protected void rollbackState(TestProcEnv env, TestSMProcedureState state) { LOG.info("ROLLBACK " + state + " " + this); @@ -266,8 +270,7 @@ protected TestSMProcedureState getInitialState() { } @Override - protected void rollback(final TestProcEnv env) - throws IOException, InterruptedException { + protected void rollback(final TestProcEnv env) throws IOException, InterruptedException { if (isEofState()) { stateCount--; } @@ -275,8 +278,8 @@ protected void rollback(final TestProcEnv env) updateTimestamp(); rollbackState(env, getCurrentState()); throw new IOException(); - } catch(IOException e) { - //do nothing for now + } catch (IOException e) { + // do nothing for now } finally { stateCount--; updateTimestamp(); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java index e359e5cedfe6..9f104fc41001 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/TestYieldProcedures.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -40,7 +40,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestYieldProcedures { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -70,8 +70,8 @@ public void setUp() throws IOException { logDir = new Path(testDir, "proc-logs"); procStore = ProcedureTestingUtility.createWalStore(htu.getConfiguration(), logDir); procRunnables = new TestScheduler(); - procExecutor = - new ProcedureExecutor<>(htu.getConfiguration(), new TestProcEnv(), procStore, procRunnables); + procExecutor = new ProcedureExecutor<>(htu.getConfiguration(), new TestProcEnv(), procStore, + procRunnables); procStore.start(PROCEDURE_EXECUTOR_SLOTS); ProcedureTestingUtility.initAndStartWorkers(procExecutor, PROCEDURE_EXECUTOR_SLOTS, true); } @@ -189,7 +189,9 @@ public long nextTimestamp() { public static class TestStateMachineProcedure extends StateMachineProcedure { - enum State { STATE_1, STATE_2, STATE_3 } + enum State { + STATE_1, STATE_2, STATE_3 + } public static class ExecutionInfo { private final boolean rollback; @@ -266,8 +268,7 @@ protected StateMachineProcedure.Flow executeFromState(TestProcEnv env, State sta } @Override - protected void rollbackState(TestProcEnv env, final State state) - throws InterruptedException { + protected void rollbackState(TestProcEnv env, final State state) throws InterruptedException { final long ts = env.nextTimestamp(); LOG.debug(getProcId() + " rollback state " + state + " ts=" + ts); executionInfo.add(new ExecutionInfo(ts, state, true)); @@ -347,13 +348,11 @@ protected boolean isYieldAfterExecutionStep(final TestProcEnv env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { } } @@ -364,7 +363,8 @@ private static class TestScheduler extends SimpleProcedureScheduler { private int yieldCalls; private int pollCalls; - public TestScheduler() {} + public TestScheduler() { + } @Override public void addFront(final Procedure proc) { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java index d88d93e571f2..9e1b4f3c722f 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/ProcedureStorePerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -32,6 +32,7 @@ import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -39,35 +40,35 @@ * Base class for testing procedure store performance. */ public abstract class ProcedureStorePerformanceEvaluation - extends AbstractHBaseTool { + extends AbstractHBaseTool { // Command line options and defaults. public static String DEFAULT_OUTPUT_PATH = "proc-store"; public static Option OUTPUT_PATH_OPTION = - new Option("output", true, "The output path. Default: " + DEFAULT_OUTPUT_PATH); + new Option("output", true, "The output path. Default: " + DEFAULT_OUTPUT_PATH); public static int DEFAULT_NUM_THREADS = 20; public static Option NUM_THREADS_OPTION = new Option("threads", true, - "Number of parallel threads which will write insert/updates/deletes to store. Default: " + - DEFAULT_NUM_THREADS); + "Number of parallel threads which will write insert/updates/deletes to store. Default: " + + DEFAULT_NUM_THREADS); public static int DEFAULT_NUM_PROCS = 1000000; // 1M public static Option NUM_PROCS_OPTION = new Option("procs", true, - "Total number of procedures. Each procedure writes one insert and one update. Default: " + - DEFAULT_NUM_PROCS); + "Total number of procedures. Each procedure writes one insert and one update. Default: " + + DEFAULT_NUM_PROCS); public static int DEFAULT_STATE_SIZE = 1024; // 1KB public static Option STATE_SIZE_OPTION = new Option("state_size", true, - "Size of serialized state in bytes to write on update. Default: " + DEFAULT_STATE_SIZE + - "bytes"); + "Size of serialized state in bytes to write on update. Default: " + DEFAULT_STATE_SIZE + + "bytes"); public static Option SYNC_OPTION = new Option("sync", true, - "Type of sync to use when writing WAL contents to file system. Accepted values: hflush, " + - "hsync, nosync. Default: hflush"); + "Type of sync to use when writing WAL contents to file system. Accepted values: hflush, " + + "hsync, nosync. Default: hflush"); public static String DEFAULT_SYNC_OPTION = "hflush"; @@ -102,8 +103,8 @@ protected void processOptions(CommandLine cmd) { numThreads = getOptionAsInt(cmd, NUM_THREADS_OPTION.getOpt(), DEFAULT_NUM_THREADS); numProcs = getOptionAsInt(cmd, NUM_PROCS_OPTION.getOpt(), DEFAULT_NUM_PROCS); syncType = cmd.getOptionValue(SYNC_OPTION.getOpt(), DEFAULT_SYNC_OPTION); - assert "hsync".equals(syncType) || "hflush".equals(syncType) || "nosync".equals( - syncType) : "sync argument can only accept one of these three values: hsync, hflush, nosync"; + assert "hsync".equals(syncType) || "hflush".equals(syncType) || "nosync".equals(syncType) + : "sync argument can only accept one of these three values: hsync, hflush, nosync"; stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE); SERIALIZED_STATE = new byte[stateSize]; Bytes.random(SERIALIZED_STATE); @@ -137,8 +138,8 @@ private void tearDownProcedureStore() { storeDir = fs.makeQualified(new Path(outputPath)); fs.delete(storeDir, true); } catch (IOException e) { - System.err.println("Error: Couldn't delete log dir. You can delete it manually to free up " + - "disk space. Location: " + storeDir); + System.err.println("Error: Couldn't delete log dir. You can delete it manually to free up " + + "disk space. Location: " + storeDir); e.printStackTrace(); } } @@ -159,8 +160,8 @@ protected int doWork() throws Exception { boolean failure = false; try { for (Future future : futures) { - long timeout = start + WORKER_THREADS_TIMEOUT_SEC * 1000 - - EnvironmentEdgeManager.currentTime(); + long timeout = + start + WORKER_THREADS_TIMEOUT_SEC * 1000 - EnvironmentEdgeManager.currentTime(); failure |= (future.get(timeout, TimeUnit.MILLISECONDS).equals(EXIT_FAILURE)); } } catch (Exception e) { @@ -219,8 +220,8 @@ public Integer call() throws IOException { } if (procId != 0 && procId % 10000 == 0) { long ns = System.nanoTime() - start; - System.out.println("Wrote " + procId + " procedures in " + - StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(ns))); + System.out.println("Wrote " + procId + " procedures in " + + StringUtils.humanTimeDiff(TimeUnit.NANOSECONDS.toMillis(ns))); } try { preWrite(procId); @@ -232,7 +233,7 @@ public Integer call() throws IOException { return EXIT_FAILURE; } ProcedureTestingUtility.TestProcedure proc = - new ProcedureTestingUtility.TestProcedure(procId); + new ProcedureTestingUtility.TestProcedure(procId); proc.setData(SERIALIZED_STATE); store.insert(proc, null); store.update(proc); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java index 29d114af7212..278258fdfb12 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/TestProcedureTree.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -45,7 +45,7 @@ public class TestProcedureTree { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestProcedureTree.class); + HBaseClassTestRule.forClass(TestProcedureTree.class); public static final class TestProcedure extends Procedure { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java index eb6446de7ea5..b06eb4d3e839 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALLoaderPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +24,6 @@ import java.util.List; import java.util.Set; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.HBaseCommonTestingUtility; @@ -35,6 +34,7 @@ import org.apache.hadoop.hbase.procedure2.util.StringUtils; import org.apache.hadoop.hbase.util.AbstractHBaseTool; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; + import org.apache.hbase.thirdparty.org.apache.commons.cli.CommandLine; import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; @@ -42,14 +42,14 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { protected static final HBaseCommonTestingUtility UTIL = new HBaseCommonTestingUtility(); // Command line options and defaults. - public static int DEFAULT_NUM_PROCS = 1000000; // 1M - public static Option NUM_PROCS_OPTION = new Option("procs", true, - "Total number of procedures. Default: " + DEFAULT_NUM_PROCS); + public static int DEFAULT_NUM_PROCS = 1000000; // 1M + public static Option NUM_PROCS_OPTION = + new Option("procs", true, "Total number of procedures. Default: " + DEFAULT_NUM_PROCS); public static int DEFAULT_NUM_WALS = 0; public static Option NUM_WALS_OPTION = new Option("wals", true, - "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + - " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); - public static int DEFAULT_STATE_SIZE = 1024; // 1KB + "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + + " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); + public static int DEFAULT_STATE_SIZE = 1024; // 1KB public static Option STATE_SIZE_OPTION = new Option("state_size", true, "Size of serialized state in bytes to write on update. Default: " + DEFAULT_STATE_SIZE + " bytes"); @@ -69,7 +69,8 @@ public class ProcedureWALLoaderPerformanceEvaluation extends AbstractHBaseTool { static byte[] serializedState; private static class LoadCounter implements ProcedureStore.ProcedureLoader { - public LoadCounter() {} + public LoadCounter() { + } @Override public void setMaxProcId(long maxProcId) { @@ -105,10 +106,10 @@ protected void processOptions(CommandLine cmd) { numWals = getOptionAsInt(cmd, NUM_WALS_OPTION.getOpt(), DEFAULT_NUM_WALS); int stateSize = getOptionAsInt(cmd, STATE_SIZE_OPTION.getOpt(), DEFAULT_STATE_SIZE); serializedState = new byte[stateSize]; - updatesPerProc = getOptionAsInt(cmd, UPDATES_PER_PROC_OPTION.getOpt(), - DEFAULT_UPDATES_PER_PROC); + updatesPerProc = + getOptionAsInt(cmd, UPDATES_PER_PROC_OPTION.getOpt(), DEFAULT_UPDATES_PER_PROC); deleteProcsFraction = getOptionAsDouble(cmd, DELETE_PROCS_FRACTION_OPTION.getOpt(), - DEFAULT_DELETE_PROCS_FRACTION); + DEFAULT_DELETE_PROCS_FRACTION); setupConf(); } @@ -140,7 +141,7 @@ private List shuffleProcWriteSequence() { Set toBeDeletedProcs = new HashSet<>(); // Add n + 1 entries of the proc id for insert + updates. If proc is chosen for delete, add // extra entry which is marked -ve in the loop after shuffle. - for (int procId = 1; procId <= numProcs; ++procId) { + for (int procId = 1; procId <= numProcs; ++procId) { procStatesSequence.addAll(Collections.nCopies(updatesPerProc + 1, procId)); if (ThreadLocalRandom.current().nextFloat() < deleteProcsFraction) { procStatesSequence.add(procId); @@ -161,7 +162,7 @@ private List shuffleProcWriteSequence() { private void writeWals() throws IOException { List procStates = shuffleProcWriteSequence(); - TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. + TestProcedure[] procs = new TestProcedure[numProcs + 1]; // 0 is not used. int numProcsPerWal = numWals > 0 ? procStates.size() / numWals : Integer.MAX_VALUE; long startTime = EnvironmentEdgeManager.currentTime(); long lastTime = startTime; @@ -179,8 +180,8 @@ private void writeWals() throws IOException { } if (i > 0 && i % numProcsPerWal == 0) { long currentTime = EnvironmentEdgeManager.currentTime(); - System.out.println("Forcing wall roll. Time taken on last WAL: " + - (currentTime - lastTime) / 1000.0f + " sec"); + System.out.println("Forcing wall roll. Time taken on last WAL: " + + (currentTime - lastTime) / 1000.0f + " sec"); store.rollWriterForTesting(); lastTime = currentTime; } @@ -203,8 +204,8 @@ private void storeRestart(ProcedureStore.ProcedureLoader loader) throws IOExcept System.out.println("Load time : " + (timeTaken / 1000.0f) + "sec"); System.out.println("******************************************"); System.out.println("Raw format for scripts"); - System.out.println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " - + "total_time_ms=%s]", + System.out + .println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + "total_time_ms=%s]", NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), serializedState.length, UPDATES_PER_PROC_OPTION.getOpt(), updatesPerProc, DELETE_PROCS_FRACTION_OPTION.getOpt(), deleteProcsFraction, NUM_WALS_OPTION.getOpt(), numWals, timeTaken)); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java index cab44264f295..ba7a1577875d 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/ProcedureWALPerformanceEvaluation.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -31,13 +31,13 @@ import org.apache.hbase.thirdparty.org.apache.commons.cli.Option; public class ProcedureWALPerformanceEvaluation - extends ProcedureStorePerformanceEvaluation { + extends ProcedureStorePerformanceEvaluation { // Command line options and defaults. public static int DEFAULT_NUM_WALS = 0; public static Option NUM_WALS_OPTION = new Option("wals", true, - "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + - " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); + "Number of WALs to write. If -ve or 0, uses " + WALProcedureStore.ROLL_THRESHOLD_CONF_KEY + + " conf to roll the logs. Default: " + DEFAULT_NUM_WALS); private long numProcsPerWal = Long.MAX_VALUE; // never roll wall based on this value. private int numWals; @@ -79,10 +79,10 @@ protected WALProcedureStore createProcedureStore(Path storeDir) throws IOExcepti @Override protected void printRawFormatResult(long timeTakenNs) { System.out - .println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + "total_time_ms=%s]", - NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), stateSize, - SYNC_OPTION.getOpt(), syncType, NUM_THREADS_OPTION.getOpt(), numThreads, - NUM_WALS_OPTION.getOpt(), numWals, timeTakenNs)); + .println(String.format("RESULT [%s=%s, %s=%s, %s=%s, %s=%s, %s=%s, " + "total_time_ms=%s]", + NUM_PROCS_OPTION.getOpt(), numProcs, STATE_SIZE_OPTION.getOpt(), stateSize, + SYNC_OPTION.getOpt(), syncType, NUM_THREADS_OPTION.getOpt(), numThreads, + NUM_WALS_OPTION.getOpt(), numWals, timeTakenNs)); } @Override diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java index 9d897cf878c5..16f8293ab8fc 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestBitSetNode.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -35,7 +35,7 @@ public class TestBitSetNode { @ClassRule public static final HBaseClassTestRule CLASS_RULE = - HBaseClassTestRule.forClass(TestBitSetNode.class); + HBaseClassTestRule.forClass(TestBitSetNode.class); @Test public void testGetActiveMaxMinProcId() { diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java index e3064c9ab823..89697a7ffb68 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestProcedureStoreTracker.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,6 @@ import static org.junit.Assert.assertTrue; import java.util.concurrent.ThreadLocalRandom; - import org.apache.hadoop.hbase.HBaseClassTestRule; import org.apache.hadoop.hbase.procedure2.Procedure; import org.apache.hadoop.hbase.testclassification.MasterTests; @@ -33,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestProcedureStoreTracker { @ClassRule public static final HBaseClassTestRule CLASS_RULE = diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java index 3d46883f2deb..284a49bf58df 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestStressWALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,6 +19,7 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; + import java.io.IOException; import java.util.Random; import java.util.concurrent.ThreadLocalRandom; @@ -43,7 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, MediumTests.class}) +@Category({ MasterTests.class, MediumTests.class }) public class TestStressWALProcedureStore { @ClassRule @@ -115,7 +116,8 @@ public void run() { for (int i = 0, nupdates = rand.nextInt(10); i <= nupdates; ++i) { try { Thread.sleep(0, rand.nextInt(15)); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { + } procStore.update(proc); } // Delete @@ -136,7 +138,8 @@ public void run() { assertEquals(1, procStore.getActiveLogs().size()); } - @Ignore @Test // REENABLE after merge of + @Ignore + @Test // REENABLE after merge of // https://github.com/google/protobuf/issues/2228#issuecomment-252058282 public void testEntrySizeLimit() throws Exception { final int NITEMS = 20; diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java index c8335eeb7d0e..a5a79a6a8530 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/store/wal/TestWALProcedureStore.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -61,7 +61,7 @@ import org.apache.hbase.thirdparty.com.google.protobuf.Int64Value; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestWALProcedureStore { @ClassRule public static final HBaseClassTestRule CLASS_RULE = @@ -161,7 +161,7 @@ public void testWalCleanerSequentialClean() throws Exception { procStore.insert(procs[i], null); procStore.rollWriterForTesting(); logs = procStore.getActiveLogs(); - assertEquals(logs.size(), i + 2); // Extra 1 for current ongoing wal. + assertEquals(logs.size(), i + 2); // Extra 1 for current ongoing wal. } // Delete procedures in sequential order make sure that only the corresponding wal is deleted @@ -176,7 +176,6 @@ public void testWalCleanerSequentialClean() throws Exception { } } - // Test that wal cleaner doesn't create holes in wal files list i.e. it only deletes files if // they are in the starting of the list. @Test @@ -189,7 +188,7 @@ public void testWalCleanerNoHoles() throws Exception { procStore.insert(procs[i], null); procStore.rollWriterForTesting(); logs = procStore.getActiveLogs(); - assertEquals(i + 2, logs.size()); // Extra 1 for current ongoing wal. + assertEquals(i + 2, logs.size()); // Extra 1 for current ongoing wal. } for (int i = 1; i < procs.length; i++) { @@ -222,18 +221,18 @@ public void testWalCleanerUpdatesDontLeaveHoles() throws Exception { TestSequentialProcedure p2 = new TestSequentialProcedure(); procStore.insert(p1, null); procStore.insert(p2, null); - procStore.rollWriterForTesting(); // generates first log with p1 + p2 + procStore.rollWriterForTesting(); // generates first log with p1 + p2 ProcedureWALFile log1 = procStore.getActiveLogs().get(0); procStore.update(p2); - procStore.rollWriterForTesting(); // generates second log with p2 + procStore.rollWriterForTesting(); // generates second log with p2 ProcedureWALFile log2 = procStore.getActiveLogs().get(1); procStore.update(p2); - procStore.rollWriterForTesting(); // generates third log with p2 - procStore.removeInactiveLogsForTesting(); // Shouldn't remove 2nd log. + procStore.rollWriterForTesting(); // generates third log with p2 + procStore.removeInactiveLogsForTesting(); // Shouldn't remove 2nd log. assertEquals(4, procStore.getActiveLogs().size()); procStore.update(p1); - procStore.rollWriterForTesting(); // generates fourth log with p1 - procStore.removeInactiveLogsForTesting(); // Should remove first two logs. + procStore.rollWriterForTesting(); // generates fourth log with p1 + procStore.removeInactiveLogsForTesting(); // Should remove first two logs. assertEquals(3, procStore.getActiveLogs().size()); assertFalse(procStore.getActiveLogs().contains(log1)); assertFalse(procStore.getActiveLogs().contains(log2)); @@ -418,8 +417,8 @@ public void testCorruptedTrailer() throws Exception { assertEquals(0, loader.getCorruptedCount()); } - private static void assertUpdated(final ProcedureStoreTracker tracker, - final Procedure[] procs, final int[] updatedProcs, final int[] nonUpdatedProcs) { + private static void assertUpdated(final ProcedureStoreTracker tracker, final Procedure[] procs, + final int[] updatedProcs, final int[] nonUpdatedProcs) { for (int index : updatedProcs) { long procId = procs[index].getProcId(); assertTrue("Procedure id : " + procId, tracker.isModified(procId)); @@ -430,17 +429,17 @@ private static void assertUpdated(final ProcedureStoreTracker tracker, } } - private static void assertDeleted(final ProcedureStoreTracker tracker, - final Procedure[] procs, final int[] deletedProcs, final int[] nonDeletedProcs) { + private static void assertDeleted(final ProcedureStoreTracker tracker, final Procedure[] procs, + final int[] deletedProcs, final int[] nonDeletedProcs) { for (int index : deletedProcs) { long procId = procs[index].getProcId(); - assertEquals("Procedure id : " + procId, - ProcedureStoreTracker.DeleteState.YES, tracker.isDeleted(procId)); + assertEquals("Procedure id : " + procId, ProcedureStoreTracker.DeleteState.YES, + tracker.isDeleted(procId)); } for (int index : nonDeletedProcs) { long procId = procs[index].getProcId(); - assertEquals("Procedure id : " + procId, - ProcedureStoreTracker.DeleteState.NO, tracker.isDeleted(procId)); + assertEquals("Procedure id : " + procId, ProcedureStoreTracker.DeleteState.NO, + tracker.isDeleted(procId)); } } @@ -451,13 +450,13 @@ public void testCorruptedTrailersRebuild() throws Exception { procs[i] = new TestSequentialProcedure(); } // Log State (I=insert, U=updated, D=delete) - // | log 1 | log 2 | log 3 | - // 0 | I, D | | | - // 1 | I | | | - // 2 | I | D | | - // 3 | I | U | | - // 4 | | I | D | - // 5 | | | I | + // | log 1 | log 2 | log 3 | + // 0 | I, D | | | + // 1 | I | | | + // 2 | I | D | | + // 3 | I | U | | + // 4 | | I | D | + // 5 | | | I | procStore.insert(procs[0], null); procStore.insert(procs[1], null); procStore.insert(procs[2], null); @@ -485,7 +484,7 @@ public void testCorruptedTrailersRebuild() throws Exception { htu.getConfiguration().setBoolean(WALProcedureStore.EXEC_WAL_CLEANUP_ON_LOAD_CONF_KEY, false); final LoadCounter loader = new LoadCounter(); storeRestart(loader); - assertEquals(3, loader.getLoadedCount()); // procs 1, 3 and 5 + assertEquals(3, loader.getLoadedCount()); // procs 1, 3 and 5 assertEquals(0, loader.getCorruptedCount()); // Check the Trackers @@ -493,13 +492,16 @@ public void testCorruptedTrailersRebuild() throws Exception { LOG.info("WALs " + walFiles); assertEquals(4, walFiles.size()); LOG.info("Checking wal " + walFiles.get(0)); - assertUpdated(walFiles.get(0).getTracker(), procs, new int[]{0, 1, 2, 3}, new int[] {4, 5}); + assertUpdated(walFiles.get(0).getTracker(), procs, new int[] { 0, 1, 2, 3 }, + new int[] { 4, 5 }); LOG.info("Checking wal " + walFiles.get(1)); - assertUpdated(walFiles.get(1).getTracker(), procs, new int[]{2, 3, 4}, new int[] {0, 1, 5}); + assertUpdated(walFiles.get(1).getTracker(), procs, new int[] { 2, 3, 4 }, + new int[] { 0, 1, 5 }); LOG.info("Checking wal " + walFiles.get(2)); - assertUpdated(walFiles.get(2).getTracker(), procs, new int[]{4, 5}, new int[] {0, 1, 2, 3}); + assertUpdated(walFiles.get(2).getTracker(), procs, new int[] { 4, 5 }, + new int[] { 0, 1, 2, 3 }); LOG.info("Checking global tracker "); - assertDeleted(procStore.getStoreTracker(), procs, new int[]{0, 2, 4}, new int[] {1, 3, 5}); + assertDeleted(procStore.getStoreTracker(), procs, new int[] { 0, 2, 4 }, new int[] { 1, 3, 5 }); } @Test @@ -531,17 +533,17 @@ public void testCorruptedProcedures() throws Exception { // Insert root-procedures TestProcedure[] rootProcs = new TestProcedure[10]; for (int i = 1; i <= rootProcs.length; i++) { - rootProcs[i-1] = new TestProcedure(i, 0); - procStore.insert(rootProcs[i-1], null); - rootProcs[i-1].addStackId(0); - procStore.update(rootProcs[i-1]); + rootProcs[i - 1] = new TestProcedure(i, 0); + procStore.insert(rootProcs[i - 1], null); + rootProcs[i - 1].addStackId(0); + procStore.update(rootProcs[i - 1]); } // insert root-child txn procStore.rollWriterForTesting(); for (int i = 1; i <= rootProcs.length; i++) { TestProcedure b = new TestProcedure(rootProcs.length + i, i); - rootProcs[i-1].addStackId(1); - procStore.insert(rootProcs[i-1], new Procedure[] { b }); + rootProcs[i - 1].addStackId(1); + procStore.insert(rootProcs[i - 1], new Procedure[] { b }); } // insert child updates procStore.rollWriterForTesting(); @@ -629,20 +631,19 @@ public void testFileNotFoundDuringLeaseRecovery() throws IOException { assertEquals(procs.length + 1, status.length); // simulate another active master removing the wals - procStore = new WALProcedureStore(htu.getConfiguration(), logDir, null, - new LeaseRecovery() { - private int count = 0; - - @Override - public void recoverFileLease(FileSystem fs, Path path) throws IOException { - if (++count <= 2) { - fs.delete(path, false); - LOG.debug("Simulate FileNotFound at count=" + count + " for " + path); - throw new FileNotFoundException("test file not found " + path); - } - LOG.debug("Simulate recoverFileLease() at count=" + count + " for " + path); + procStore = new WALProcedureStore(htu.getConfiguration(), logDir, null, new LeaseRecovery() { + private int count = 0; + + @Override + public void recoverFileLease(FileSystem fs, Path path) throws IOException { + if (++count <= 2) { + fs.delete(path, false); + LOG.debug("Simulate FileNotFound at count=" + count + " for " + path); + throw new FileNotFoundException("test file not found " + path); } - }); + LOG.debug("Simulate recoverFileLease() at count=" + count + " for " + path); + } + }); final LoadCounter loader = new LoadCounter(); procStore.start(PROCEDURE_STORE_SLOTS); @@ -656,7 +657,7 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { @Test public void testLogFileAlreadyExists() throws IOException { - final boolean[] tested = {false}; + final boolean[] tested = { false }; WALProcedureStore mStore = Mockito.spy(procStore); Answer ans = new Answer() { @@ -806,20 +807,19 @@ public void recoverFileLease(FileSystem fs, Path path) throws IOException { }); } - private LoadCounter restartAndAssert(long maxProcId, long runnableCount, - int completedCount, int corruptedCount) throws Exception { - return ProcedureTestingUtility.storeRestartAndAssert(procStore, maxProcId, - runnableCount, completedCount, corruptedCount); + private LoadCounter restartAndAssert(long maxProcId, long runnableCount, int completedCount, + int corruptedCount) throws Exception { + return ProcedureTestingUtility.storeRestartAndAssert(procStore, maxProcId, runnableCount, + completedCount, corruptedCount); } - private void corruptLog(final FileStatus logFile, final long dropBytes) - throws IOException { + private void corruptLog(final FileStatus logFile, final long dropBytes) throws IOException { assertTrue(logFile.getLen() > dropBytes); - LOG.debug("corrupt log " + logFile.getPath() + - " size=" + logFile.getLen() + " drop=" + dropBytes); + LOG.debug( + "corrupt log " + logFile.getPath() + " size=" + logFile.getLen() + " drop=" + dropBytes); Path tmpPath = new Path(testDir, "corrupted.log"); InputStream in = fs.open(logFile.getPath()); - OutputStream out = fs.create(tmpPath); + OutputStream out = fs.create(tmpPath); IOUtils.copyBytes(in, out, logFile.getLen() - dropBytes, true); if (!fs.rename(tmpPath, logFile.getPath())) { throw new IOException("Unable to rename"); @@ -856,8 +856,7 @@ protected boolean abort(Void env) { } @Override - protected void serializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void serializeStateData(ProcedureStateSerializer serializer) throws IOException { long procId = getProcId(); if (procId % 2 == 0) { Int64Value.Builder builder = Int64Value.newBuilder().setValue(procId); @@ -866,8 +865,7 @@ protected void serializeStateData(ProcedureStateSerializer serializer) } @Override - protected void deserializeStateData(ProcedureStateSerializer serializer) - throws IOException { + protected void deserializeStateData(ProcedureStateSerializer serializer) throws IOException { long procId = getProcId(); if (procId % 2 == 0) { Int64Value value = serializer.deserialize(Int64Value.class); diff --git a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java index 0d494fcdd6b3..93b85c8c2c86 100644 --- a/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java +++ b/hbase-procedure/src/test/java/org/apache/hadoop/hbase/procedure2/util/TestDelayedUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -28,7 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -@Category({MasterTests.class, SmallTests.class}) +@Category({ MasterTests.class, SmallTests.class }) public class TestDelayedUtil { @ClassRule @@ -50,9 +50,8 @@ public void testDelayedContainerEquals() { ZeroDelayContainer o1cb = new ZeroDelayContainer<>(o1); ZeroDelayContainer o2c = new ZeroDelayContainer<>(o2); - ZeroDelayContainer[] items = new ZeroDelayContainer[] { - lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, - }; + ZeroDelayContainer[] items = + new ZeroDelayContainer[] { lnull, l10a, l10b, l15, onull, o1ca, o1cb, o2c, }; assertContainersEquals(lnull, items, lnull, onull); assertContainersEquals(l10a, items, l10a, l10b); @@ -75,8 +74,8 @@ private void assertContainersEquals(final ZeroDelayContainer src, } } boolean isMatching = src.equals(items[i]); - assertEquals(src.getObject() + " unexpectedly match " + items[i].getObject(), - shouldMatch, isMatching); + assertEquals(src.getObject() + " unexpectedly match " + items[i].getObject(), shouldMatch, + isMatching); } } diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml index 8b269c8644ea..8c116b910ea9 100644 --- a/hbase-protocol-shaded/pom.xml +++ b/hbase-protocol-shaded/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -36,13 +36,28 @@ --> 3.17.3 + + + + + org.apache.hbase.thirdparty + hbase-shaded-protobuf + + + junit + junit + test + + org.apache.maven.plugins maven-source-plugin - + maven-assembly-plugin @@ -56,10 +71,10 @@ secondPartTestsExecution - test test + test true @@ -72,10 +87,10 @@ compile-protoc - generate-sources compile + generate-sources com.google.protobuf:protoc:${internal.protobuf.version}:exe:${os.detected.classifier} true @@ -95,48 +110,48 @@ com.google.code.maven-replacer-plugin replacer 1.5.3 + + ${basedir}/target/generated-sources/ + + **/*.java + + + true + + + ([^\.])com.google.protobuf + $1org.apache.hbase.thirdparty.com.google.protobuf + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + - process-sources replace + process-sources - - ${basedir}/target/generated-sources/ - - **/*.java - - - true - - - ([^\.])com.google.protobuf - $1org.apache.hbase.thirdparty.com.google.protobuf - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - org.apache.maven.plugins maven-shade-plugin - package shade + package true true @@ -187,21 +202,6 @@ - - - - - org.apache.hbase.thirdparty - hbase-shaded-protobuf - - - junit - junit - test - - @@ -260,9 +260,7 @@ - - com.google.code.maven-replacer-plugin - + com.google.code.maven-replacer-plugin replacer [1.5.3,) @@ -271,7 +269,7 @@ - false + false diff --git a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java index f8cef893d7d7..9f839d3f6ecf 100644 --- a/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java +++ b/hbase-protocol-shaded/src/main/java/org/apache/hadoop/hbase/util/ForeignExceptionUtil.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,19 +22,20 @@ import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; + import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.ForeignExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.GenericExceptionMessage; import org.apache.hadoop.hbase.shaded.protobuf.generated.ErrorHandlingProtos.StackTraceElementMessage; /** - * Helper to convert Exceptions and StackTraces from/to protobuf. - * (see ErrorHandling.proto for the internal of the proto messages) + * Helper to convert Exceptions and StackTraces from/to protobuf. (see ErrorHandling.proto for the + * internal of the proto messages) */ @InterfaceAudience.Private public final class ForeignExceptionUtil { - private ForeignExceptionUtil() { } + private ForeignExceptionUtil() { + } public static Exception toException(final ForeignExceptionMessage eem) { Exception re; @@ -58,7 +59,7 @@ public static IOException toIOException(final ForeignExceptionMessage eem) { private static T createException(final Class clazz, final ForeignExceptionMessage eem) throws ClassNotFoundException, NoSuchMethodException, - InstantiationException, IllegalAccessException, InvocationTargetException { + InstantiationException, IllegalAccessException, InvocationTargetException { final GenericExceptionMessage gem = eem.getGenericException(); final Class realClass = Class.forName(gem.getClassName()); final Class cls = realClass.asSubclass(clazz); @@ -127,8 +128,7 @@ public static List toProtoStackTraceElement(StackTrace } /** - * Unwind a serialized array of {@link StackTraceElementMessage}s to a - * {@link StackTraceElement}s. + * Unwind a serialized array of {@link StackTraceElementMessage}s to a {@link StackTraceElement}s. * @param traceList list that was serialized * @return the deserialized list or null if it couldn't be unwound (e.g. wasn't set on * the sender). @@ -140,10 +140,8 @@ public static StackTraceElement[] toStackTrace(List tr StackTraceElement[] trace = new StackTraceElement[traceList.size()]; for (int i = 0; i < traceList.size(); i++) { StackTraceElementMessage elem = traceList.get(i); - trace[i] = new StackTraceElement( - elem.getDeclaringClass(), elem.getMethodName(), - elem.hasFileName() ? elem.getFileName() : null, - elem.getLineNumber()); + trace[i] = new StackTraceElement(elem.getDeclaringClass(), elem.getMethodName(), + elem.hasFileName() ? elem.getFileName() : null, elem.getLineNumber()); } return trace; } diff --git a/hbase-protocol/pom.xml b/hbase-protocol/pom.xml index b184ef1b5035..9ec28bf4ac53 100644 --- a/hbase-protocol/pom.xml +++ b/hbase-protocol/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -32,6 +32,17 @@ true + + + + com.google.protobuf + protobuf-java + + + org.slf4j + slf4j-api + + @@ -52,10 +63,10 @@ secondPartTestsExecution - test test + test true @@ -68,10 +79,10 @@ compile-protoc - generate-sources compile + generate-sources @@ -83,45 +94,34 @@ com.google.code.maven-replacer-plugin replacer 1.5.3 + + ${basedir}/target/generated-sources/ + + **/*.java + + + + (public)(\W+static)?(\W+final)?(\W+class) + @javax.annotation.Generated("proto") $1$2$3$4 + + + + (@javax.annotation.Generated\("proto"\) ){2} + $1 + + + - generate-sources replace + generate-sources - - ${basedir}/target/generated-sources/ - - **/*.java - - - - (public)(\W+static)?(\W+final)?(\W+class) - @javax.annotation.Generated("proto") $1$2$3$4 - - - - (@javax.annotation.Generated\("proto"\) ){2} - $1 - - - - - - - com.google.protobuf - protobuf-java - - - org.slf4j - slf4j-api - - @@ -181,9 +181,7 @@ - - com.google.code.maven-replacer-plugin - + com.google.code.maven-replacer-plugin replacer [1.5.3,) @@ -191,7 +189,7 @@ - + diff --git a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java index f10d30f9d4b1..342a38a7c933 100644 --- a/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java +++ b/hbase-protocol/src/main/java/com/google/protobuf/HBaseZeroCopyByteString.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,18 +15,16 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.protobuf; // This is a lie. +package com.google.protobuf; // This is a lie. import org.apache.yetus.audience.InterfaceAudience; /** * Helper class to extract byte arrays from {@link ByteString} without copy. *

    - * Without this protobufs would force us to copy every single byte array out - * of the objects de-serialized from the wire (which already do one copy, on - * top of the copies the JVM does to go from kernel buffer to C buffer and - * from C buffer to JVM buffer). - * + * Without this protobufs would force us to copy every single byte array out of the objects + * de-serialized from the wire (which already do one copy, on top of the copies the JVM does to go + * from kernel buffer to C buffer and from C buffer to JVM buffer). * @since 0.96.1 */ @InterfaceAudience.Private @@ -63,15 +61,15 @@ public static ByteString wrap(final byte[] array, int offset, int length) { /** * Extracts the byte array from the given {@link ByteString} without copy. - * @param buf A buffer from which to extract the array. This buffer must be - * actually an instance of a {@code LiteralByteString}. + * @param buf A buffer from which to extract the array. This buffer must be actually an instance + * of a {@code LiteralByteString}. * @return byte[] representation */ public static byte[] zeroCopyGetBytes(final ByteString buf) { if (buf instanceof LiteralByteString) { return ((LiteralByteString) buf).bytes; } - throw new UnsupportedOperationException("Need a LiteralByteString, got a " - + buf.getClass().getName()); + throw new UnsupportedOperationException( + "Need a LiteralByteString, got a " + buf.getClass().getName()); } } diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java index 65f1cc672143..b6368036a47e 100644 --- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java +++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/util/ByteStringer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,13 +17,12 @@ */ package org.apache.hadoop.hbase.util; +import com.google.protobuf.ByteString; +import com.google.protobuf.HBaseZeroCopyByteString; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.protobuf.ByteString; -import com.google.protobuf.HBaseZeroCopyByteString; - /** * Hack to workaround HBASE-10304 issue that keeps bubbling up when a mapreduce context. */ @@ -41,7 +40,7 @@ public class ByteStringer { // because it makes a copy of the passed in array. static { try { - HBaseZeroCopyByteString.wrap(new byte [0]); + HBaseZeroCopyByteString.wrap(new byte[0]); } catch (IllegalAccessError iae) { USE_ZEROCOPYBYTESTRING = false; LOG.debug("Failed to classload HBaseZeroCopyByteString: " + iae.toString()); @@ -56,14 +55,15 @@ private ByteStringer() { * Wraps a byte array in a {@link ByteString} without copying it. */ public static ByteString wrap(final byte[] array) { - return USE_ZEROCOPYBYTESTRING? HBaseZeroCopyByteString.wrap(array): ByteString.copyFrom(array); + return USE_ZEROCOPYBYTESTRING ? HBaseZeroCopyByteString.wrap(array) + : ByteString.copyFrom(array); } /** * Wraps a subset of a byte array in a {@link ByteString} without copying it. */ public static ByteString wrap(final byte[] array, int offset, int length) { - return USE_ZEROCOPYBYTESTRING? HBaseZeroCopyByteString.wrap(array, offset, length): - ByteString.copyFrom(array, offset, length); + return USE_ZEROCOPYBYTESTRING ? HBaseZeroCopyByteString.wrap(array, offset, length) + : ByteString.copyFrom(array, offset, length); } } diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml index 593a79ae7385..67087d453ac3 100644 --- a/hbase-replication/pom.xml +++ b/hbase-replication/pom.xml @@ -1,6 +1,5 @@ - - + + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -31,27 +30,6 @@ Apache HBase - Replication HBase Replication Support - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - net.revelc.code - warbucks-maven-plugin - - - - org.apache.hbase.thirdparty @@ -152,14 +130,36 @@ + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + net.revelc.code + warbucks-maven-plugin + + + + hadoop-2.0 - - !hadoop.profile + + + !hadoop.profile @@ -251,8 +251,7 @@ lifecycle-mapping - - + diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java index 83421600aa0d..6dba30a34c04 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java index 5c21e1e023ce..36b958d2fa2e 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java index f1103b268e98..a70a62309a71 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,7 +20,6 @@ import java.util.List; import java.util.Map; import java.util.Set; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.TableName; @@ -37,8 +36,7 @@ public interface ReplicationPeer { */ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) enum PeerState { - ENABLED, - DISABLED + ENABLED, DISABLED } /** @@ -99,7 +97,7 @@ default boolean isPeerEnabled() { /** * @deprecated since 2.1.0 and will be removed in 4.0.0. Use - * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. + * {@link #registerPeerConfigListener(ReplicationPeerConfigListener)} instead. * @see #registerPeerConfigListener(ReplicationPeerConfigListener) * @see HBASE-19573 */ diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java index d4d8023ead76..d0bacda6d496 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerConfigListener.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.replication; import org.apache.hadoop.hbase.HBaseInterfaceAudience; @@ -24,8 +22,8 @@ @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.REPLICATION) public interface ReplicationPeerConfigListener { - /** Callback method for when users update the ReplicationPeerConfig for this peer - * + /** + * Callback method for when users update the ReplicationPeerConfig for this peer * @param rpc The updated ReplicationPeerConfig */ void peerConfigUpdated(ReplicationPeerConfig rpc); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java index 1adda02e6318..a2aa0e1faf12 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,7 +18,6 @@ package org.apache.hadoop.hbase.replication; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java index ea5a7ac4c4ac..23c6eb5a2e9e 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeers.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index cd65f9b3a891..959d5456a95a 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,20 +17,18 @@ */ package org.apache.hadoop.hbase.replication; - import java.util.ArrayList; import java.util.Collections; import java.util.List; - +import org.apache.hadoop.hbase.ServerName; import org.apache.yetus.audience.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hbase.ServerName; /** - * This class is responsible for the parsing logic for a queue id representing a queue. - * It will extract the peerId if it's recovered as well as the dead region servers - * that were part of the queue's history. + * This class is responsible for the parsing logic for a queue id representing a queue. It will + * extract the peerId if it's recovered as well as the dead region servers that were part of the + * queue's history. */ @InterfaceAudience.Private public class ReplicationQueueInfo { @@ -44,8 +41,8 @@ public class ReplicationQueueInfo { private List deadRegionServers = new ArrayList<>(); /** - * The passed queueId will be either the id of the peer or the handling story of that queue - * in the form of id-servername-* + * The passed queueId will be either the id of the peer or the handling story of that queue in the + * form of id-servername-* */ public ReplicationQueueInfo(String queueId) { this.queueId = queueId; @@ -63,10 +60,10 @@ public ReplicationQueueInfo(String queueId) { * "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name>-... */ - private static void - extractDeadServersFromZNodeString(String deadServerListStr, List result) { + private static void extractDeadServersFromZNodeString(String deadServerListStr, + List result) { - if(deadServerListStr == null || result == null || deadServerListStr.isEmpty()) return; + if (deadServerListStr == null || result == null || deadServerListStr.isEmpty()) return; // valid server name delimiter "-" has to be after "," in a server name int seenCommaCnt = 0; @@ -75,32 +72,32 @@ public ReplicationQueueInfo(String queueId) { for (int i = 0; i < len; i++) { switch (deadServerListStr.charAt(i)) { - case ',': - seenCommaCnt += 1; - break; - case '-': - if(seenCommaCnt>=2) { - if (i > startIndex) { - String serverName = deadServerListStr.substring(startIndex, i); - if(ServerName.isFullServerName(serverName)){ - result.add(ServerName.valueOf(serverName)); - } else { - LOG.error("Found invalid server name:" + serverName); + case ',': + seenCommaCnt += 1; + break; + case '-': + if (seenCommaCnt >= 2) { + if (i > startIndex) { + String serverName = deadServerListStr.substring(startIndex, i); + if (ServerName.isFullServerName(serverName)) { + result.add(ServerName.valueOf(serverName)); + } else { + LOG.error("Found invalid server name:" + serverName); + } + startIndex = i + 1; } - startIndex = i + 1; + seenCommaCnt = 0; } - seenCommaCnt = 0; - } - break; - default: - break; + break; + default: + break; } } // add tail - if(startIndex < len - 1){ + if (startIndex < len - 1) { String serverName = deadServerListStr.substring(startIndex, len); - if(ServerName.isFullServerName(serverName)){ + if (ServerName.isFullServerName(serverName)) { result.add(ServerName.valueOf(serverName)); } else { LOG.error("Found invalid server name at the end:" + serverName); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java index 59278e9807d5..90f380e2367a 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +21,6 @@ import java.util.Map; import java.util.Set; import java.util.SortedSet; - import org.apache.hadoop.fs.Path; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.util.Pair; diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java index 462cfedd0a04..429b44bdb542 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationStorageFactory.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java index fb2b0d517d82..aa88742ae9a6 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationUtils.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -117,11 +117,11 @@ public static boolean isNamespacesAndTableCFsEqual(ReplicationPeerConfig rpc1, return false; } if (rpc1.replicateAllUserTables()) { - return isNamespacesEqual(rpc1.getExcludeNamespaces(), rpc2.getExcludeNamespaces()) && - isTableCFsEqual(rpc1.getExcludeTableCFsMap(), rpc2.getExcludeTableCFsMap()); + return isNamespacesEqual(rpc1.getExcludeNamespaces(), rpc2.getExcludeNamespaces()) + && isTableCFsEqual(rpc1.getExcludeTableCFsMap(), rpc2.getExcludeTableCFsMap()); } else { - return isNamespacesEqual(rpc1.getNamespaces(), rpc2.getNamespaces()) && - isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap()); + return isNamespacesEqual(rpc1.getNamespaces(), rpc2.getNamespaces()) + && isTableCFsEqual(rpc1.getTableCFsMap(), rpc2.getTableCFsMap()); } } @@ -135,8 +135,8 @@ public static boolean isReplicationForBulkLoadDataEnabled(final Configuration c) } /** - * @deprecated Will be removed in HBase 3. - * Use {@link ReplicationPeerConfig#needToReplicate(TableName)} instead. + * @deprecated Will be removed in HBase 3. Use + * {@link ReplicationPeerConfig#needToReplicate(TableName)} instead. * @param peerConfig configuration for the replication peer cluster * @param tableName name of the table * @return true if the table need replicate to the peer cluster diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java index 4b3b70220c39..37ef439c1c1b 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.replication.ReplicationPeerConfigUtil; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; import org.apache.hadoop.hbase.zookeeper.ZKUtil; import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; @@ -31,6 +30,8 @@ import org.apache.yetus.audience.InterfaceAudience; import org.apache.zookeeper.KeeperException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.ReplicationProtos; + /** * ZK based replication peer storage. */ @@ -45,9 +46,9 @@ public class ZKReplicationPeerStorage extends ZKReplicationStorageBase public static final String PEERS_STATE_ZNODE_DEFAULT = "peer-state"; public static final byte[] ENABLED_ZNODE_BYTES = - toByteArray(ReplicationProtos.ReplicationState.State.ENABLED); + toByteArray(ReplicationProtos.ReplicationState.State.ENABLED); public static final byte[] DISABLED_ZNODE_BYTES = - toByteArray(ReplicationProtos.ReplicationState.State.DISABLED); + toByteArray(ReplicationProtos.ReplicationState.State.DISABLED); /** * The name of the znode that contains the replication status of a remote slave (i.e. peer) diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java index c51bdfcc283e..8c99855a2552 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -162,10 +162,10 @@ String getSerialReplicationRegionPeerNode(String encodedRegionName, String peerI "Invalid encoded region name: " + encodedRegionName + ", length should be 32."); } return new StringBuilder(regionsZNode).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 0, 2).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 2, 4).append(ZNodePaths.ZNODE_PATH_SEPARATOR) - .append(encodedRegionName, 4, encodedRegionName.length()).append("-").append(peerId) - .toString(); + .append(encodedRegionName, 0, 2).append(ZNodePaths.ZNODE_PATH_SEPARATOR) + .append(encodedRegionName, 2, 4).append(ZNodePaths.ZNODE_PATH_SEPARATOR) + .append(encodedRegionName, 4, encodedRegionName.length()).append("-").append(peerId) + .toString(); } @Override @@ -198,8 +198,8 @@ public void removeWAL(ServerName serverName, String queueId, String fileName) } catch (NoNodeException e) { LOG.warn("{} already deleted when removing log", fileNode); } catch (KeeperException e) { - throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + - ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + throw new ReplicationException("Failed to remove wal from queue (serverName=" + serverName + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } } @@ -350,12 +350,12 @@ public void removeLastSequenceIds(String peerId, List encodedRegionNames throws ReplicationException { try { List listOfOps = - encodedRegionNames.stream().map(n -> getSerialReplicationRegionPeerNode(n, peerId)) - .map(ZKUtilOp::deleteNodeFailSilent).collect(Collectors.toList()); + encodedRegionNames.stream().map(n -> getSerialReplicationRegionPeerNode(n, peerId)) + .map(ZKUtilOp::deleteNodeFailSilent).collect(Collectors.toList()); ZKUtil.multiOrSequential(zookeeper, listOfOps, true); } catch (KeeperException e) { - throw new ReplicationException("Failed to remove last sequence ids, peerId=" + peerId + - ", encodedRegionNames.size=" + encodedRegionNames.size(), e); + throw new ReplicationException("Failed to remove last sequence ids, peerId=" + peerId + + ", encodedRegionNames.size=" + encodedRegionNames.size(), e); } } @@ -366,14 +366,14 @@ public long getWALPosition(ServerName serverName, String queueId, String fileNam try { bytes = ZKUtil.getData(zookeeper, getFileNode(serverName, queueId, fileName)); } catch (KeeperException | InterruptedException e) { - throw new ReplicationException("Failed to get log position (serverName=" + serverName + - ", queueId=" + queueId + ", fileName=" + fileName + ")", e); + throw new ReplicationException("Failed to get log position (serverName=" + serverName + + ", queueId=" + queueId + ", fileName=" + fileName + ")", e); } try { return ZKUtil.parseWALPositionFrom(bytes); } catch (DeserializationException de) { - LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", - serverName, queueId, fileName); + LOG.warn("Failed parse log position (serverName={}, queueId={}, fileName={})", serverName, + queueId, fileName); } // if we can not parse the position, start at the beginning of the wal file again return 0; @@ -391,10 +391,8 @@ public Pair> claimQueue(ServerName sourceServerName, S try { ZKUtil.createWithParents(zookeeper, getRsNode(destServerName)); } catch (KeeperException e) { - throw new ReplicationException( - "Claim queue queueId=" + queueId + " from " + sourceServerName + " to " + destServerName + - " failed when creating the node for " + destServerName, - e); + throw new ReplicationException("Claim queue queueId=" + queueId + " from " + sourceServerName + + " to " + destServerName + " failed when creating the node for " + destServerName, e); } String newQueueId = queueId + "-" + sourceServerName; try { @@ -440,11 +438,11 @@ public Pair> claimQueue(ServerName sourceServerName, S // queue to tell the upper layer that claim nothing. For other types of exception should be // thrown out to notify the upper layer. LOG.info("Claim queue queueId={} from {} to {} failed with {}, someone else took the log?", - queueId,sourceServerName, destServerName, e.toString()); + queueId, sourceServerName, destServerName, e.toString()); return new Pair<>(newQueueId, Collections.emptySortedSet()); } catch (KeeperException | InterruptedException e) { - throw new ReplicationException("Claim queue queueId=" + queueId + " from " + - sourceServerName + " to " + destServerName + " failed", e); + throw new ReplicationException("Claim queue queueId=" + queueId + " from " + sourceServerName + + " to " + destServerName + " failed", e); } } @@ -478,8 +476,8 @@ public List getListOfReplicators() throws ReplicationException { private List getWALsInQueue0(ServerName serverName, String queueId) throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName, - queueId)); + List children = + ZKUtil.listChildrenNoWatch(zookeeper, getQueueNode(serverName, queueId)); return children != null ? children : Collections.emptyList(); } @@ -521,7 +519,7 @@ protected int getQueuesZNodeCversion() throws KeeperException { * Therefore, we must update the cversion of root {@link #queuesZNode} when migrate wal nodes to * other queues. * @see #claimQueue(ServerName, String, ServerName) as an example of updating root - * {@link #queuesZNode} cversion. + * {@link #queuesZNode} cversion. */ @Override public Set getAllWALs() throws ReplicationException { @@ -543,8 +541,8 @@ public Set getAllWALs() throws ReplicationException { if (v0 == v1) { return wals; } - LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", - v0, v1, retry); + LOG.info("Replication queue node cversion changed from %d to %d, retry = %d", v0, v1, + retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all wals", e); @@ -597,8 +595,8 @@ public void addHFileRefs(String peerId, List> pairs) List listOfOps = pairs.stream().map(p -> p.getSecond().getName()) .map(n -> getHFileNode(peerNode, n)) .map(f -> ZKUtilOp.createAndFailSilent(f, HConstants.EMPTY_BYTE_ARRAY)).collect(toList()); - LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", - peerNode, listOfOps.size()); + LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", peerNode, + listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -613,8 +611,8 @@ public void removeHFileRefs(String peerId, List files) throws Replicatio List listOfOps = files.stream().map(n -> getHFileNode(peerNode, n)) .map(ZKUtilOp::deleteNodeFailSilent).collect(toList()); - LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", - peerNode, listOfOps.size()); + LOG.debug("The multi list size for removing hfile references in zk for node {} is {}", peerNode, + listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { @@ -638,8 +636,8 @@ public List getAllPeersFromHFileRefsQueue() throws ReplicationException } private List getReplicableHFiles0(String peerId) throws KeeperException { - List children = ZKUtil.listChildrenNoWatch(this.zookeeper, - getHFileRefsPeerNode(peerId)); + List children = + ZKUtil.listChildrenNoWatch(this.zookeeper, getHFileRefsPeerNode(peerId)); return children != null ? children : Collections.emptyList(); } @@ -683,7 +681,7 @@ public Set getAllHFileRefs() throws ReplicationException { return hfileRefs; } LOG.debug("Replication hfile references node cversion changed from %d to %d, retry = %d", - v0, v1, retry); + v0, v1, retry); } } catch (KeeperException e) { throw new ReplicationException("Failed to get all hfile refs", e); diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java index d6e692aef381..c60a1c1f55c4 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ZKReplicationStorageBase.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,7 +19,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.zookeeper.ZKWatcher; import org.apache.hadoop.hbase.zookeeper.ZNodePaths; diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java index cf8e97398f5d..54ca6b7a1720 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateBasic.java @@ -79,11 +79,11 @@ public void testReplicationQueueStorage() throws ReplicationException { */ rqs.addWAL(server1, "qId1", "trash"); rqs.removeWAL(server1, "qId1", "trash"); - rqs.addWAL(server1,"qId2", "filename1"); - rqs.addWAL(server1,"qId3", "filename2"); - rqs.addWAL(server1,"qId3", "filename3"); - rqs.addWAL(server2,"trash", "trash"); - rqs.removeQueue(server2,"trash"); + rqs.addWAL(server1, "qId2", "filename1"); + rqs.addWAL(server1, "qId3", "filename2"); + rqs.addWAL(server1, "qId3", "filename3"); + rqs.addWAL(server2, "trash", "trash"); + rqs.removeQueue(server2, "trash"); List reps = rqs.getListOfReplicators(); assertEquals(2, reps.size()); @@ -105,10 +105,11 @@ public void testReplicationQueueStorage() throws ReplicationException { } private void removeAllQueues(ServerName serverName) throws ReplicationException { - for (String queue: rqs.getAllQueues(serverName)) { + for (String queue : rqs.getAllQueues(serverName)) { rqs.removeQueue(serverName, queue); } } + @Test public void testReplicationQueues() throws ReplicationException { // Initialize ReplicationPeer so we can add peers (we don't transfer lone queues) @@ -166,7 +167,7 @@ public void testHfileRefsReplicationQueues() throws ReplicationException, Keeper assertTrue(rqs.getReplicableHFiles(ID_ONE).isEmpty()); assertEquals(0, rqs.getAllPeersFromHFileRefsQueue().size()); rp.getPeerStorage().addPeer(ID_ONE, - ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true); + ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true); rqs.addPeerToHFileRefs(ID_ONE); rqs.addHFileRefs(ID_ONE, files1); assertEquals(1, rqs.getAllPeersFromHFileRefsQueue().size()); @@ -363,9 +364,9 @@ protected void populateQueues() throws ReplicationException { rqs.addWAL(server3, "qId" + i, "filename" + j); } // Add peers for the corresponding queues so they are not orphans - rp.getPeerStorage().addPeer("qId" + i, - ReplicationPeerConfig.newBuilder(). - setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(), true); + rp.getPeerStorage().addPeer("qId" + i, ReplicationPeerConfig.newBuilder() + .setClusterKey(MiniZooKeeperCluster.HOST + ":2818:/bogus" + i).build(), + true); } } } diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java index 9eb67f9037d8..ef05e64de049 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestReplicationStateZKImpl.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -69,8 +69,8 @@ private static String initPeerClusterState(String baseZKNode) Configuration testConf = new Configuration(conf); testConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, baseZKNode); ZKWatcher zkw1 = new ZKWatcher(testConf, "test1", null); - String fakeRs = ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode, - "hostname1.example.org:1234"); + String fakeRs = + ZNodePaths.joinZNode(zkw1.getZNodePaths().rsZNode, "hostname1.example.org:1234"); ZKUtil.createWithParents(zkw1, fakeRs); ZKClusterId.setClusterId(zkw1, new ClusterId()); return ZKConfig.getZooKeeperClusterKey(testConf); diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java index 51a3408c1e39..ae8a776b68ed 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationPeerStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -128,8 +128,8 @@ private void assertMapEquals(Map> expected, } else { assertNotNull(actualCFs); assertEquals(expectedCFs.size(), actualCFs.size()); - for (Iterator expectedIt = expectedCFs.iterator(), actualIt = actualCFs.iterator(); - expectedIt.hasNext();) { + for (Iterator expectedIt = expectedCFs.iterator(), + actualIt = actualCFs.iterator(); expectedIt.hasNext();) { assertEquals(expectedIt.next(), actualIt.next()); } } @@ -205,31 +205,32 @@ public void testBaseReplicationPeerConfig() throws ReplicationException { Configuration conf = UTIL.getConfiguration(); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, - customPeerConfigKey.concat("=").concat(customPeerConfigValue).concat(";"). - concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); + customPeerConfigKey.concat("=").concat(customPeerConfigValue).concat(";") + .concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondValue)); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig - assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigSecondKey)); + assertEquals(customPeerConfigValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); + assertEquals(customPeerConfigSecondValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigSecondKey)); // validates base configs get updated values even if config already present conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, - customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";"). - concat(customPeerConfigSecondKey).concat("=").concat(customPeerConfigSecondUpdatedValue)); + customPeerConfigKey.concat("=").concat(customPeerConfigUpdatedValue).concat(";") + .concat(customPeerConfigSecondKey).concat("=") + .concat(customPeerConfigSecondUpdatedValue)); - ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + ReplicationPeerConfig replicationPeerConfigAfterValueUpdate = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); - assertEquals(customPeerConfigUpdatedValue, replicationPeerConfigAfterValueUpdate. - getConfiguration().get(customPeerConfigKey)); - assertEquals(customPeerConfigSecondUpdatedValue, replicationPeerConfigAfterValueUpdate. - getConfiguration().get(customPeerConfigSecondKey)); + assertEquals(customPeerConfigUpdatedValue, + replicationPeerConfigAfterValueUpdate.getConfiguration().get(customPeerConfigKey)); + assertEquals(customPeerConfigSecondUpdatedValue, + replicationPeerConfigAfterValueUpdate.getConfiguration().get(customPeerConfigSecondKey)); } @Test @@ -245,26 +246,26 @@ public void testBaseReplicationRemovePeerConfig() throws ReplicationException { conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat(customPeerConfigValue)); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); // validates base configs are present in replicationPeerConfig - assertEquals(customPeerConfigValue, updatedReplicationPeerConfig.getConfiguration(). - get(customPeerConfigKey)); + assertEquals(customPeerConfigValue, + updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); conf.unset(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG); conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat("")); - ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); + ReplicationPeerConfig replicationPeerConfigRemoved = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, updatedReplicationPeerConfig); assertNull(replicationPeerConfigRemoved.getConfiguration().get(customPeerConfigKey)); } @Test public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() - throws ReplicationException { + throws ReplicationException { String customPeerConfigKey = "hbase.xxx.custom_config"; ReplicationPeerConfig existingReplicationPeerConfig = getConfig(1); @@ -274,15 +275,16 @@ public void testBaseReplicationRemovePeerConfigWithNoExistingConfig() conf.set(ReplicationPeerConfigUtil.HBASE_REPLICATION_PEER_BASE_CONFIG, customPeerConfigKey.concat("=").concat("")); - ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil. - updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); + ReplicationPeerConfig updatedReplicationPeerConfig = ReplicationPeerConfigUtil + .updateReplicationBasePeerConfigs(conf, existingReplicationPeerConfig); assertNull(updatedReplicationPeerConfig.getConfiguration().get(customPeerConfigKey)); } @Test public void testPeerNameControl() throws Exception { String clusterKey = "key"; - STORAGE.addPeer("6", ReplicationPeerConfig.newBuilder().setClusterKey(clusterKey).build(), true); + STORAGE.addPeer("6", ReplicationPeerConfig.newBuilder().setClusterKey(clusterKey).build(), + true); try { STORAGE.addPeer("6", ReplicationPeerConfig.newBuilder().setClusterKey(clusterKey).build(), diff --git a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java index f56e8ce5063b..4428a0c0cb7b 100644 --- a/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java +++ b/hbase-replication/src/test/java/org/apache/hadoop/hbase/replication/TestZKReplicationQueueStorage.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information diff --git a/hbase-resource-bundle/pom.xml b/hbase-resource-bundle/pom.xml index 6a380cf93cbe..5c96f073a52b 100644 --- a/hbase-resource-bundle/pom.xml +++ b/hbase-resource-bundle/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration @@ -36,15 +36,15 @@ true - + - - maven-assembly-plugin - - true - + + maven-assembly-plugin + + true + org.apache.maven.plugins diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml index 23530f2fa4d5..c9b3f562daf4 100644 --- a/hbase-rest/pom.xml +++ b/hbase-rest/pom.xml @@ -1,4 +1,4 @@ - + 4.0.0 - hbase-build-configuration org.apache.hbase + hbase-build-configuration 2.5.0-SNAPSHOT ../hbase-build-configuration hbase-rest Apache HBase - Rest HBase Rest Server - - - - - - ${project.build.directory} - - hbase-webapps/** - - - - - - src/test/resources - - **/** - - - - - - - maven-assembly-plugin - - true - - - - - org.apache.maven.plugins - maven-source-plugin - - - - maven-antrun-plugin - - - - generate - generate-sources - - - - - - - - - - - - - - - - - - - - - run - - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - - jspcSource-packageInfo-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - - maven-surefire-plugin - - - target/test-classes/webapps - - - - - org.xolstice.maven.plugins - protobuf-maven-plugin - - - compile-protoc - generate-sources - - compile - - - - - - net.revelc.code - warbucks-maven-plugin - - - - com.sun.jersey - jersey-core + com.sun.jersey + jersey-core @@ -300,12 +187,12 @@ --> org.codehaus.jettison jettison - - - stax - stax-api - - + + + stax + stax-api + + @@ -392,6 +279,119 @@ test + + + + + + ${project.build.directory} + + hbase-webapps/** + + + + + + src/test/resources + + **/** + + + + + + + maven-assembly-plugin + + true + + + + + org.apache.maven.plugins + maven-source-plugin + + + + maven-antrun-plugin + + + + generate + + run + + generate-sources + + + + + + + + + + + + + + + + + + + + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + + jspcSource-packageInfo-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + + maven-surefire-plugin + + + target/test-classes/webapps + + + + + org.xolstice.maven.plugins + protobuf-maven-plugin + + + compile-protoc + + compile + + generate-sources + + + + + net.revelc.code + warbucks-maven-plugin + + + @@ -508,8 +508,8 @@ test - com.google.guava - guava + com.google.guava + guava diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java index 56bc9297f85f..af8b9e303bdf 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/Constants.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import org.apache.yetus.audience.InterfaceAudience; @@ -29,7 +28,7 @@ public interface Constants { String VERSION_STRING = "0.0.3"; - int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours + int DEFAULT_MAX_AGE = 60 * 60 * 4; // 4 hours int DEFAULT_LISTEN_PORT = 8080; @@ -83,11 +82,13 @@ public interface Constants { String SCAN_FILTER = "filter"; String SCAN_REVERSED = "reversed"; String SCAN_CACHE_BLOCKS = "cacheblocks"; - String CUSTOM_FILTERS = "hbase.rest.custom.filters"; + String CUSTOM_FILTERS = "hbase.rest.custom.filters"; String ROW_KEYS_PARAM_NAME = "row"; - /** If this query parameter is present when processing row or scanner resources, - it disables server side block caching */ + /** + * If this query parameter is present when processing row or scanner resources, it disables server + * side block caching + */ String NOCACHE_PARAM_NAME = "nocache"; /** Configuration parameter to set rest client connection timeout */ diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java index 0a6fd0e1d5ac..d49e0769d843 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ExistsResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -53,19 +51,17 @@ public ExistsResource(TableResource tableResource) throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF, MIMETYPE_BINARY}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF, + MIMETYPE_BINARY }) public Response get(final @Context UriInfo uriInfo) { try { if (!tableResource.exists()) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } ResponseBuilder response = Response.ok(); response.cacheControl(cacheControl); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java index f1b2cea6e952..ef42e9344fa6 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MetricsREST.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -8,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -16,13 +15,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CompatibilitySingletonFactory; - import org.apache.hadoop.hbase.rest.MetricsRESTSource; +import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public class MetricsREST { @@ -34,23 +31,23 @@ public MetricsRESTSource getSource() { private MetricsRESTSource source; public MetricsREST() { - source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); + source = CompatibilitySingletonFactory.getInstance(MetricsRESTSource.class); } - + /** * @param inc How much to add to requests. */ public void incrementRequests(final int inc) { source.incrementRequests(inc); } - + /** * @param inc How much to add to sucessfulGetCount. */ public void incrementSucessfulGetRequests(final int inc) { source.incrementSucessfulGetRequests(inc); } - + /** * @param inc How much to add to sucessfulPutCount. */ @@ -64,7 +61,7 @@ public void incrementSucessfulPutRequests(final int inc) { public void incrementFailedPutRequests(final int inc) { source.incrementFailedPutRequests(inc); } - + /** * @param inc How much to add to failedGetCount. */ diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java index 2d097752bd9b..ee221a907562 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/MultiRowResource.java @@ -1,5 +1,4 @@ -/** - * +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -46,7 +45,6 @@ public class MultiRowResource extends ResourceBase implements Constants { /** * Constructor - * * @param tableResource * @param versions * @throws java.io.IOException @@ -87,15 +85,14 @@ public Response get(final @Context UriInfo uriInfo) { } } - ResultGenerator generator = - ResultGenerator.fromRowSpec(this.tableResource.getName(), rowSpec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(this.tableResource.getName(), + rowSpec, null, !params.containsKey(NOCACHE_PARAM_NAME)); Cell value = null; RowModel rowModel = new RowModel(rowSpec.getRow()); if (generator.hasNext()) { while ((value = generator.next()) != null) { - rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil - .cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), + CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); } model.addRow(rowModel); } else { @@ -106,11 +103,10 @@ public Response get(final @Context UriInfo uriInfo) { } if (model.getRows().isEmpty()) { - //If no rows found. + // If no rows found. servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("No rows found." + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("No rows found." + CRLF).build(); } else { servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(model).build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java index e1623af96a16..68b63d756a49 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesInstanceResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -83,30 +82,29 @@ public NamespacesInstanceResource(String namespace, boolean queryTables) throws * @param context servlet context * @param uriInfo (JAX-RS context variable) request URL * @return A response containing NamespacesInstanceModel for a namespace descriptions and - * TableListModel for a list of namespace tables. + * TableListModel for a list of namespace tables. */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, - final @Context UriInfo uriInfo) { + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); // Respond to list of namespace tables requests. - if(queryTables){ + if (queryTables) { TableListModel tableModel = new TableListModel(); - try{ + try { HTableDescriptor[] tables = servlet.getAdmin().listTableDescriptorsByNamespace(namespace); - for(int i = 0; i < tables.length; i++){ + for (int i = 0; i < tables.length; i++) { tableModel.add(new TableModel(tables[i].getTableName().getQualifierAsString())); } servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(tableModel).build(); - }catch(IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); throw new RuntimeException("Cannot retrieve table list for '" + namespace + "'."); } @@ -114,8 +112,7 @@ public Response get(final @Context ServletContext context, // Respond to namespace description requests. try { - NamespacesInstanceModel rowModel = - new NamespacesInstanceModel(servlet.getAdmin(), namespace); + NamespacesInstanceModel rowModel = new NamespacesInstanceModel(servlet.getAdmin(), namespace); servlet.getMetrics().incrementSucessfulGetRequests(1); return Response.ok(rowModel).build(); } catch (IOException e) { @@ -131,8 +128,7 @@ public Response get(final @Context ServletContext context, * @return response code. */ @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) public Response put(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { return processUpdate(model, true, uriInfo); } @@ -144,14 +140,11 @@ public Response put(final NamespacesInstanceModel model, final @Context UriInfo * @return response code. */ @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final NamespacesInstanceModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final NamespacesInstanceModel model, final @Context UriInfo uriInfo) { return processUpdate(model, false, uriInfo); } - // Check that POST or PUT is valid and then update namespace. private Response processUpdate(NamespacesInstanceModel model, final boolean updateExisting, final UriInfo uriInfo) { @@ -161,7 +154,7 @@ private Response processUpdate(NamespacesInstanceModel model, final boolean upda if (model == null) { try { model = new NamespacesInstanceModel(namespace); - } catch(IOException ioe) { + } catch (IOException ioe) { servlet.getMetrics().incrementFailedPutRequests(1); throw new RuntimeException("Cannot retrieve info for '" + namespace + "'."); } @@ -179,25 +172,25 @@ private Response processUpdate(NamespacesInstanceModel model, final boolean upda try { admin = servlet.getAdmin(); namespaceExists = doesNamespaceExist(admin, namespace); - }catch (IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } // Do not allow creation if namespace already exists. - if(!updateExisting && namespaceExists){ + if (!updateExisting && namespaceExists) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' already exists. Use REST PUT " + - "to alter the existing namespace.").build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity("Namespace '" + + namespace + "' already exists. Use REST PUT " + "to alter the existing namespace.") + .build(); } // Do not allow altering if namespace does not exist. - if (updateExisting && !namespaceExists){ + if (updateExisting && !namespaceExists) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' does not exist. Use " + - "REST POST to create the namespace.").build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT).entity( + "Namespace '" + namespace + "' does not exist. Use " + "REST POST to create the namespace.") + .build(); } return createOrUpdate(model, uriInfo, admin, updateExisting); @@ -208,32 +201,32 @@ private Response createOrUpdate(final NamespacesInstanceModel model, final UriIn final Admin admin, final boolean updateExisting) { NamespaceDescriptor.Builder builder = NamespaceDescriptor.create(namespace); builder.addConfiguration(model.getProperties()); - if(model.getProperties().size() > 0){ + if (model.getProperties().size() > 0) { builder.addConfiguration(model.getProperties()); } NamespaceDescriptor nsd = builder.build(); - try{ - if(updateExisting){ + try { + if (updateExisting) { admin.modifyNamespace(nsd); - }else{ + } else { admin.createNamespace(nsd); } - }catch (IOException e) { + } catch (IOException e) { servlet.getMetrics().incrementFailedPutRequests(1); return processException(e); } servlet.getMetrics().incrementSucessfulPutRequests(1); - return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() : - Response.created(uriInfo.getAbsolutePath()).build(); + return updateExisting ? Response.ok(uriInfo.getAbsolutePath()).build() + : Response.created(uriInfo.getAbsolutePath()).build(); } - private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException{ + private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOException { NamespaceDescriptor[] nd = admin.listNamespaceDescriptors(); - for(int i = 0; i < nd.length; i++){ - if(nd[i].getName().equals(namespaceName)){ + for (int i = 0; i < nd.length; i++) { + if (nd[i].getName().equals(namespaceName)) { return true; } } @@ -247,8 +240,8 @@ private boolean doesNamespaceExist(Admin admin, String namespaceName) throws IOE * @return response code. */ @DELETE - public Response deleteNoBody(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response deleteNoBody(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { LOG.trace("DELETE " + uriInfo.getAbsolutePath()); } @@ -258,12 +251,12 @@ public Response deleteNoBody(final byte[] message, .entity("Forbidden" + CRLF).build(); } - try{ + try { Admin admin = servlet.getAdmin(); - if (!doesNamespaceExist(admin, namespace)){ - return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT). - entity("Namespace '" + namespace + "' does not exists. Cannot " + - "drop namespace.").build(); + if (!doesNamespaceExist(admin, namespace)) { + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Namespace '" + namespace + "' does not exists. Cannot " + "drop namespace.") + .build(); } admin.deleteNamespace(namespace); @@ -280,8 +273,8 @@ public Response deleteNoBody(final byte[] message, * Dispatch to NamespaceInstanceResource for getting list of tables. */ @Path("tables") - public NamespacesInstanceResource getNamespaceInstanceResource( - final @PathParam("tables") String namespace) throws IOException { + public NamespacesInstanceResource + getNamespaceInstanceResource(final @PathParam("tables") String namespace) throws IOException { return new NamespacesInstanceResource(this.namespace, true); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java index e458d463f672..c83d41cec5a2 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/NamespacesResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -60,8 +58,8 @@ public NamespacesResource() throws IOException { * @return a response for a version request */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java index d5e4354e4391..57150d42b2ad 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufMessageHandler.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; - import org.apache.yetus.audience.InterfaceAudience; /** - * Common interface for models capable of supporting protobuf marshalling - * and unmarshalling. Hooks up to the ProtobufMessageBodyConsumer and - * ProtobufMessageBodyProducer adapters. + * Common interface for models capable of supporting protobuf marshalling and unmarshalling. Hooks + * up to the ProtobufMessageBodyConsumer and ProtobufMessageBodyProducer adapters. */ @InterfaceAudience.Private public interface ProtobufMessageHandler { @@ -41,6 +37,5 @@ public interface ProtobufMessageHandler { * @return reference to self for convenience * @throws IOException */ - ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException; + ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java index d1ba5b7dd827..3ce1ff5c7b29 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ProtobufStreamingOutput.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -50,15 +50,15 @@ protected ProtobufStreamingOutput(ResultScanner scanner, String type, int limit, this.limit = limit; this.fetchSize = fetchSize; if (LOG.isTraceEnabled()) { - LOG.trace("Created StreamingOutput with content type = " + this.contentType - + " user limit : " + this.limit + " scan fetch size : " + this.fetchSize); + LOG.trace("Created StreamingOutput with content type = " + this.contentType + " user limit : " + + this.limit + " scan fetch size : " + this.fetchSize); } } @Override public void write(OutputStream outStream) throws IOException, WebApplicationException { Result[] rowsToSend; - if(limit < fetchSize){ + if (limit < fetchSize) { rowsToSend = this.resultScanner.next(limit); writeToStream(createModelFromResults(rowsToSend), this.contentType, outStream); } else { @@ -69,7 +69,7 @@ public void write(OutputStream outStream) throws IOException, WebApplicationExce } else { rowsToSend = this.resultScanner.next(this.fetchSize); } - if(rowsToSend.length == 0){ + if (rowsToSend.length == 0) { break; } count = count - rowsToSend.length; @@ -81,7 +81,7 @@ public void write(OutputStream outStream) throws IOException, WebApplicationExce private void writeToStream(CellSetModel model, String contentType, OutputStream outStream) throws IOException { byte[] objectBytes = model.createProtobufOutput(); - outStream.write(Bytes.toBytes((short)objectBytes.length)); + outStream.write(Bytes.toBytes((short) objectBytes.length)); outStream.write(objectBytes); outStream.flush(); if (LOG.isTraceEnabled()) { @@ -96,8 +96,8 @@ private CellSetModel createModelFromResults(Result[] results) { RowModel rModel = new RowModel(rowKey); List kvs = rs.listCells(); for (Cell kv : kvs) { - rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), kv - .getTimestamp(), CellUtil.cloneValue(kv))); + rModel.addCell(new CellModel(CellUtil.cloneFamily(kv), CellUtil.cloneQualifier(kv), + kv.getTimestamp(), CellUtil.cloneValue(kv))); } cellSetModel.addRow(rModel); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java index 954503242daf..1bdd414ed493 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.lang.management.ManagementFactory; @@ -88,14 +87,14 @@ public class RESTServer implements Constants { static final String REST_CSRF_ENABLED_KEY = "hbase.rest.csrf.enabled"; static final boolean REST_CSRF_ENABLED_DEFAULT = false; boolean restCSRFEnabled = false; - static final String REST_CSRF_CUSTOM_HEADER_KEY ="hbase.rest.csrf.custom.header"; + static final String REST_CSRF_CUSTOM_HEADER_KEY = "hbase.rest.csrf.custom.header"; static final String REST_CSRF_CUSTOM_HEADER_DEFAULT = "X-XSRF-HEADER"; static final String REST_CSRF_METHODS_TO_IGNORE_KEY = "hbase.rest.csrf.methods.to.ignore"; static final String REST_CSRF_METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; public static final String SKIP_LOGIN_KEY = "hbase.rest.skip.login"; static final int DEFAULT_HTTP_MAX_HEADER_SIZE = 64 * 1024; // 64k static final String HTTP_HEADER_CACHE_SIZE = "hbase.rest.http.header.cache.size"; - static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE -1; + static final int DEFAULT_HTTP_HEADER_CACHE_SIZE = Character.MAX_VALUE - 1; private static final String PATH_SPEC_ANY = "/*"; @@ -103,12 +102,12 @@ public class RESTServer implements Constants { // HTTP OPTIONS method is commonly used in REST APIs for negotiation. So it is enabled by default. private static boolean REST_HTTP_ALLOW_OPTIONS_METHOD_DEFAULT = true; static final String REST_CSRF_BROWSER_USERAGENTS_REGEX_KEY = - "hbase.rest-csrf.browser-useragents-regex"; + "hbase.rest-csrf.browser-useragents-regex"; // HACK, making this static for AuthFilter to get at our configuration. Necessary for unit tests. @edu.umd.cs.findbugs.annotations.SuppressWarnings( - value={"ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL"}, - justification="For testing") + value = { "ST_WRITE_TO_STATIC_FROM_INSTANCE_METHOD", "MS_CANNOT_BE_FINAL" }, + justification = "For testing") public static Configuration conf = null; private final UserProvider userProvider; private Server server; @@ -122,16 +121,17 @@ public RESTServer(Configuration conf) { private static void printUsageAndExit(Options options, int exitCode) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("hbase rest start", "", options, - "\nTo run the REST server as a daemon, execute " + - "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", true); + "\nTo run the REST server as a daemon, execute " + + "hbase-daemon.sh start|stop rest [-i ] [-p ] [-ro]\n", + true); System.exit(exitCode); } void addCSRFFilter(ServletContextHandler ctxHandler, Configuration conf) { restCSRFEnabled = conf.getBoolean(REST_CSRF_ENABLED_KEY, REST_CSRF_ENABLED_DEFAULT); if (restCSRFEnabled) { - Map restCsrfParams = RestCsrfPreventionFilter - .getFilterParams(conf, "hbase.rest-csrf."); + Map restCsrfParams = + RestCsrfPreventionFilter.getFilterParams(conf, "hbase.rest-csrf."); FilterHolder holder = new FilterHolder(); holder.setName("csrf"); holder.setClassName(RestCsrfPreventionFilter.class.getName()); @@ -149,8 +149,8 @@ private void addClickjackingPreventionFilter(ServletContextHandler ctxHandler, ctxHandler.addFilter(holder, PATH_SPEC_ANY, EnumSet.allOf(DispatcherType.class)); } - private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, - Configuration conf, boolean isSecure) { + private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, Configuration conf, + boolean isSecure) { FilterHolder holder = new FilterHolder(); holder.setName("securityheaders"); holder.setClassName(SecurityHeadersFilter.class.getName()); @@ -159,13 +159,12 @@ private void addSecurityHeadersFilter(ServletContextHandler ctxHandler, } // login the server principal (if using secure Hadoop) - private static Pair> loginServerPrincipal( - UserProvider userProvider, Configuration conf) throws Exception { + private static Pair> + loginServerPrincipal(UserProvider userProvider, Configuration conf) throws Exception { Class containerClass = ServletContainer.class; if (userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled()) { - String machineName = Strings.domainNamePointerToHostName( - DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), - conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); String keytabFilename = conf.get(REST_KEYTAB_FILE); Preconditions.checkArgument(keytabFilename != null && !keytabFilename.isEmpty(), REST_KEYTAB_FILE + " should be set if security is enabled"); @@ -181,7 +180,7 @@ private static Pair> loginServer FilterHolder authFilter = new FilterHolder(); authFilter.setClassName(AuthFilter.class.getName()); authFilter.setName("AuthenticationFilter"); - return new Pair<>(authFilter,containerClass); + return new Pair<>(authFilter, containerClass); } } return new Pair<>(null, containerClass); @@ -190,8 +189,8 @@ private static Pair> loginServer private static void parseCommandLine(String[] args, Configuration conf) { Options options = new Options(); options.addOption("p", "port", true, "Port to bind to [default: " + DEFAULT_LISTEN_PORT + "]"); - options.addOption("ro", "readonly", false, "Respond only to GET HTTP " + - "method requests [default: false]"); + options.addOption("ro", "readonly", false, + "Respond only to GET HTTP " + "method requests [default: false]"); options.addOption("i", "infoport", true, "Port for WEB UI"); CommandLine commandLine = null; @@ -250,23 +249,22 @@ private static void parseCommandLine(String[] args, Configuration conf) { } } - /** * Runs the REST server. */ public synchronized void run() throws Exception { - Pair> pair = loginServerPrincipal( - userProvider, conf); + Pair> pair = + loginServerPrincipal(userProvider, conf); FilterHolder authFilter = pair.getFirst(); Class containerClass = pair.getSecond(); RESTServlet servlet = RESTServlet.getInstance(conf, userProvider); - // Set up the Jersey servlet container for Jetty // The Jackson1Feature is a signal to Jersey that it should use jackson doing json. - // See here: https://stackoverflow.com/questions/39458230/how-register-jacksonfeature-on-clientconfig - ResourceConfig application = new ResourceConfig(). - packages("org.apache.hadoop.hbase.rest").register(JacksonJaxbJsonProvider.class); + // See here: + // https://stackoverflow.com/questions/39458230/how-register-jacksonfeature-on-clientconfig + ResourceConfig application = new ResourceConfig().packages("org.apache.hadoop.hbase.rest") + .register(JacksonJaxbJsonProvider.class); // Using our custom ServletContainer is tremendously important. This is what makes sure the // UGI.doAs() is done for the remoteUser, and calls are not made as the REST server itself. ServletContainer servletContainer = ReflectionUtils.newInstance(containerClass, application); @@ -282,23 +280,24 @@ public synchronized void run() throws Exception { // Use the default queue (unbounded with Jetty 9.3) if the queue size is negative, otherwise use // bounded {@link ArrayBlockingQueue} with the given size int queueSize = servlet.getConfiguration().getInt(REST_THREAD_POOL_TASK_QUEUE_SIZE, -1); - int idleTimeout = servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); - QueuedThreadPool threadPool = queueSize > 0 ? - new QueuedThreadPool(maxThreads, minThreads, idleTimeout, new ArrayBlockingQueue<>(queueSize)) : - new QueuedThreadPool(maxThreads, minThreads, idleTimeout); + int idleTimeout = + servlet.getConfiguration().getInt(REST_THREAD_POOL_THREAD_IDLE_TIMEOUT, 60000); + QueuedThreadPool threadPool = queueSize > 0 + ? new QueuedThreadPool(maxThreads, minThreads, idleTimeout, + new ArrayBlockingQueue<>(queueSize)) + : new QueuedThreadPool(maxThreads, minThreads, idleTimeout); this.server = new Server(threadPool); // Setup JMX - MBeanContainer mbContainer=new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); + MBeanContainer mbContainer = new MBeanContainer(ManagementFactory.getPlatformMBeanServer()); server.addEventListener(mbContainer); server.addBean(mbContainer); - String host = servlet.getConfiguration().get("hbase.rest.host", "0.0.0.0"); int servicePort = servlet.getConfiguration().getInt("hbase.rest.port", 8080); - int httpHeaderCacheSize = servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, - DEFAULT_HTTP_HEADER_CACHE_SIZE); + int httpHeaderCacheSize = + servlet.getConfiguration().getInt(HTTP_HEADER_CACHE_SIZE, DEFAULT_HTTP_HEADER_CACHE_SIZE); HttpConfiguration httpConfig = new HttpConfiguration(); httpConfig.setSecureScheme("https"); httpConfig.setSecurePort(servicePort); @@ -318,49 +317,48 @@ public synchronized void run() throws Exception { SslContextFactory sslCtxFactory = new SslContextFactory(); String keystore = conf.get(REST_SSL_KEYSTORE_STORE); String keystoreType = conf.get(REST_SSL_KEYSTORE_TYPE); - String password = HBaseConfiguration.getPassword(conf, - REST_SSL_KEYSTORE_PASSWORD, null); - String keyPassword = HBaseConfiguration.getPassword(conf, - REST_SSL_KEYSTORE_KEYPASSWORD, password); + String password = HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_PASSWORD, null); + String keyPassword = + HBaseConfiguration.getPassword(conf, REST_SSL_KEYSTORE_KEYPASSWORD, password); sslCtxFactory.setKeyStorePath(keystore); - if(StringUtils.isNotBlank(keystoreType)) { + if (StringUtils.isNotBlank(keystoreType)) { sslCtxFactory.setKeyStoreType(keystoreType); } sslCtxFactory.setKeyStorePassword(password); sslCtxFactory.setKeyManagerPassword(keyPassword); String trustStore = conf.get(REST_SSL_TRUSTSTORE_STORE); - if(StringUtils.isNotBlank(trustStore)) { + if (StringUtils.isNotBlank(trustStore)) { sslCtxFactory.setTrustStorePath(trustStore); } String trustStorePassword = - HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); - if(StringUtils.isNotBlank(trustStorePassword)) { + HBaseConfiguration.getPassword(conf, REST_SSL_TRUSTSTORE_PASSWORD, null); + if (StringUtils.isNotBlank(trustStorePassword)) { sslCtxFactory.setTrustStorePassword(trustStorePassword); } String trustStoreType = conf.get(REST_SSL_TRUSTSTORE_TYPE); - if(StringUtils.isNotBlank(trustStoreType)) { + if (StringUtils.isNotBlank(trustStoreType)) { sslCtxFactory.setTrustStoreType(trustStoreType); } - String[] excludeCiphers = servlet.getConfiguration().getStrings( - REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + String[] excludeCiphers = servlet.getConfiguration() + .getStrings(REST_SSL_EXCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (excludeCiphers.length != 0) { sslCtxFactory.setExcludeCipherSuites(excludeCiphers); } - String[] includeCiphers = servlet.getConfiguration().getStrings( - REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); + String[] includeCiphers = servlet.getConfiguration() + .getStrings(REST_SSL_INCLUDE_CIPHER_SUITES, ArrayUtils.EMPTY_STRING_ARRAY); if (includeCiphers.length != 0) { sslCtxFactory.setIncludeCipherSuites(includeCiphers); } - String[] excludeProtocols = servlet.getConfiguration().getStrings( - REST_SSL_EXCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + String[] excludeProtocols = servlet.getConfiguration().getStrings(REST_SSL_EXCLUDE_PROTOCOLS, + ArrayUtils.EMPTY_STRING_ARRAY); if (excludeProtocols.length != 0) { sslCtxFactory.setExcludeProtocols(excludeProtocols); } - String[] includeProtocols = servlet.getConfiguration().getStrings( - REST_SSL_INCLUDE_PROTOCOLS, ArrayUtils.EMPTY_STRING_ARRAY); + String[] includeProtocols = servlet.getConfiguration().getStrings(REST_SSL_INCLUDE_PROTOCOLS, + ArrayUtils.EMPTY_STRING_ARRAY); if (includeProtocols.length != 0) { sslCtxFactory.setIncludeProtocols(includeProtocols); } @@ -384,15 +382,16 @@ public synchronized void run() throws Exception { server.setStopAtShutdown(true); // set up context - ServletContextHandler ctxHandler = new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); + ServletContextHandler ctxHandler = + new ServletContextHandler(server, "/", ServletContextHandler.SESSIONS); ctxHandler.addServlet(sh, PATH_SPEC_ANY); if (authFilter != null) { ctxHandler.addFilter(authFilter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); } // Load filters from configuration. - String[] filterClasses = servlet.getConfiguration().getStrings(FILTER_CLASSES, - GzipFilter.class.getName()); + String[] filterClasses = + servlet.getConfiguration().getStrings(FILTER_CLASSES, GzipFilter.class.getName()); for (String filter : filterClasses) { filter = filter.trim(); ctxHandler.addFilter(filter, PATH_SPEC_ANY, EnumSet.of(DispatcherType.REQUEST)); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java index 6c71bb6222e0..c58255c8bac6 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServlet.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,15 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; - import org.apache.hadoop.conf.Configuration; -import org.apache.yetus.audience.InterfaceAudience; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.ParseFilter; @@ -32,6 +27,9 @@ import org.apache.hadoop.hbase.util.JvmPauseMonitor; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.ProxyUsers; +import org.apache.yetus.audience.InterfaceAudience; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Singleton class encapsulating global REST servlet state and functions. @@ -58,7 +56,7 @@ UserGroupInformation getRealUser() { * @return the RESTServlet singleton instance */ public synchronized static RESTServlet getInstance() { - assert(INSTANCE != null); + assert (INSTANCE != null); return INSTANCE; } @@ -75,8 +73,8 @@ public ConnectionCache getConnectionCache() { * @return the RESTServlet singleton instance * @throws IOException */ - public synchronized static RESTServlet getInstance(Configuration conf, - UserProvider userProvider) throws IOException { + public synchronized static RESTServlet getInstance(Configuration conf, UserProvider userProvider) + throws IOException { if (INSTANCE == null) { INSTANCE = new RESTServlet(conf, userProvider); } @@ -96,16 +94,14 @@ public synchronized static void stop() { * @param userProvider the login user provider * @throws IOException */ - RESTServlet(final Configuration conf, - final UserProvider userProvider) throws IOException { + RESTServlet(final Configuration conf, final UserProvider userProvider) throws IOException { this.realUser = userProvider.getCurrent().getUGI(); this.conf = conf; registerCustomFilter(conf); int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000); int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000); - connectionCache = new ConnectionCache( - conf, userProvider, cleanInterval, maxIdleTime); + connectionCache = new ConnectionCache(conf, userProvider, cleanInterval, maxIdleTime); if (supportsProxyuser()) { ProxyUsers.refreshSuperUserGroupsConfiguration(conf); } @@ -136,8 +132,7 @@ MetricsREST getMetrics() { } /** - * Helper method to determine if server should - * only respond to GET HTTP method requests. + * Helper method to determine if server should only respond to GET HTTP method requests. * @return boolean for server read-only state */ boolean isReadOnly() { @@ -166,8 +161,7 @@ private void registerCustomFilter(Configuration conf) { for (String filterClass : filterList) { String[] filterPart = filterClass.split(":"); if (filterPart.length != 2) { - LOG.warn( - "Invalid filter specification " + filterClass + " - skipping"); + LOG.warn("Invalid filter specification " + filterClass + " - skipping"); } else { ParseFilter.registerFilter(filterPart[0], filterPart[1]); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java index 28cf4cba9fa7..a8b7b78aff77 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RESTServletContainer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; +import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; + import java.io.IOException; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; @@ -31,11 +31,10 @@ import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig; import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer; -import static org.apache.hadoop.hbase.http.ProxyUserAuthenticationFilter.toLowerCase; /** - * REST servlet container. It is used to get the remote request user - * without going through @HttpContext, so that we can minimize code changes. + * REST servlet container. It is used to get the remote request user without going + * through @HttpContext, so that we can minimize code changes. */ @InterfaceAudience.Private public class RESTServletContainer extends ServletContainer { @@ -46,13 +45,12 @@ public RESTServletContainer(ResourceConfig config) { } /** - * This container is used only if authentication and - * impersonation is enabled. The remote request user is used - * as a proxy user for impersonation in invoking any REST service. + * This container is used only if authentication and impersonation is enabled. The remote request + * user is used as a proxy user for impersonation in invoking any REST service. */ @Override - public void service(final HttpServletRequest request, - final HttpServletResponse response) throws ServletException, IOException { + public void service(final HttpServletRequest request, final HttpServletResponse response) + throws ServletException, IOException { final HttpServletRequest lowerCaseRequest = toLowerCase(request); final String doAsUserFromQuery = lowerCaseRequest.getParameter("doas"); RESTServlet servlet = RESTServlet.getInstance(); @@ -69,7 +67,7 @@ public void service(final HttpServletRequest request, // validate the proxy user authorization try { ProxyUsers.authorize(ugi, request.getRemoteAddr(), conf); - } catch(AuthorizationException e) { + } catch (AuthorizationException e) { throw new ServletException(e.getMessage()); } servlet.setEffectiveUser(doAsUserFromQuery); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java index 784894e27571..02197a1515d4 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RegionsResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -67,8 +65,8 @@ public RegionsResource(TableResource tableResource) throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -83,14 +81,14 @@ public Response get(final @Context UriInfo uriInfo) { List locs; try (Connection connection = ConnectionFactory.createConnection(servlet.getConfiguration()); - RegionLocator locator = connection.getRegionLocator(tableName)) { + RegionLocator locator = connection.getRegionLocator(tableName)) { locs = locator.getAllRegionLocations(); } for (HRegionLocation loc : locs) { RegionInfo hri = loc.getRegion(); ServerName addr = loc.getServerName(); model.add(new TableRegionModel(tableName.getNameAsString(), hri.getRegionId(), - hri.getStartKey(), hri.getEndKey(), addr.getAddress().toString())); + hri.getStartKey(), hri.getEndKey(), addr.getAddress().toString())); } ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); @@ -98,14 +96,12 @@ public Response get(final @Context UriInfo uriInfo) { return response.build(); } catch (TableNotFoundException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java index 11e0949711a1..552ca98d2f88 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResourceBase.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -33,7 +31,7 @@ public class ResourceBase implements Constants { RESTServlet servlet; - Class accessDeniedClazz; + Class accessDeniedClazz; public ResourceBase() throws IOException { servlet = RESTServlet.getInstance(); @@ -42,44 +40,35 @@ public ResourceBase() throws IOException { } catch (ClassNotFoundException e) { } } - + protected Response processException(Throwable exp) { Throwable curr = exp; - if(accessDeniedClazz != null) { - //some access denied exceptions are buried + if (accessDeniedClazz != null) { + // some access denied exceptions are buried while (curr != null) { - if(accessDeniedClazz.isAssignableFrom(curr.getClass())) { + if (accessDeniedClazz.isAssignableFrom(curr.getClass())) { throw new WebApplicationException( - Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } curr = curr.getCause(); } } - //TableNotFound may also be buried one level deep - if (exp instanceof TableNotFoundException || - exp.getCause() instanceof TableNotFoundException) { + // TableNotFound may also be buried one level deep + if (exp instanceof TableNotFoundException || exp.getCause() instanceof TableNotFoundException) { throw new WebApplicationException( - Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } - if (exp instanceof NoSuchColumnFamilyException){ + if (exp instanceof NoSuchColumnFamilyException) { throw new WebApplicationException( - Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } if (exp instanceof RuntimeException) { throw new WebApplicationException( - Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } if (exp instanceof RetriesExhaustedWithDetailsException) { RetriesExhaustedWithDetailsException retryException = @@ -87,9 +76,7 @@ protected Response processException(Throwable exp) { processException(retryException.getCause(0)); } throw new WebApplicationException( - Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF + - StringUtils.stringifyException(exp) + CRLF) - .build()); + Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF + StringUtils.stringifyException(exp) + CRLF).build()); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java index 41135a814f38..a2d7ab3944a3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,19 +19,16 @@ import java.io.IOException; import java.util.Iterator; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.rest.model.ScannerModel; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private public abstract class ResultGenerator implements Iterator { - public static ResultGenerator fromRowSpec(final String table, - final RowSpec rowspec, final Filter filter, final boolean cacheBlocks) - throws IOException { + public static ResultGenerator fromRowSpec(final String table, final RowSpec rowspec, + final Filter filter, final boolean cacheBlocks) throws IOException { if (rowspec.isSingleRow()) { return new RowResultGenerator(table, rowspec, filter, cacheBlocks); } else { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java index 3f5e1e1f6f82..814c72125438 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RootResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -60,15 +58,15 @@ public RootResource() throws IOException { private final TableListModel getTableList() throws IOException { TableListModel tableList = new TableListModel(); TableName[] tableNames = servlet.getAdmin().listTableNames(); - for (TableName name: tableNames) { + for (TableName name : tableNames) { tableList.add(new TableModel(name.getNameAsString())); } return tableList; } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -86,8 +84,7 @@ public Response get(final @Context UriInfo uriInfo) { } @Path("status/cluster") - public StorageClusterStatusResource getClusterStatusResource() - throws IOException { + public StorageClusterStatusResource getClusterStatusResource() throws IOException { return new StorageClusterStatusResource(); } @@ -97,8 +94,7 @@ public VersionResource getVersionResource() throws IOException { } @Path("{table}") - public TableResource getTableResource( - final @PathParam("table") String table) throws IOException { + public TableResource getTableResource(final @PathParam("table") String table) throws IOException { return new TableResource(table); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java index 8761a866c09b..0aabb8c30b7a 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -79,8 +77,8 @@ public class RowResource extends ResourceBase { * @param returnResult * @throws IOException */ - public RowResource(TableResource tableResource, String rowspec, - String versions, String check, String returnResult) throws IOException { + public RowResource(TableResource tableResource, String rowspec, String versions, String check, + String returnResult) throws IOException { super(); this.tableResource = tableResource; this.rowspec = new RowSpec(rowspec); @@ -94,8 +92,7 @@ public RowResource(TableResource tableResource, String rowspec, } @GET - @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -103,14 +100,12 @@ public Response get(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); MultivaluedMap params = uriInfo.getQueryParameters(); try { - ResultGenerator generator = - ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, + null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } int count = 0; CellSetModel model = new CellSetModel(); @@ -124,7 +119,7 @@ public Response get(final @Context UriInfo uriInfo) { rowModel = new RowModel(rowKey); } rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), - value.getTimestamp(), CellUtil.cloneValue(value))); + value.getTimestamp(), CellUtil.cloneValue(value))); if (++count > rowspec.getMaxValues()) { break; } @@ -143,7 +138,7 @@ public Response get(final @Context UriInfo uriInfo) { @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); // doesn't make sense to use a non specific coordinate as this can only @@ -151,24 +146,22 @@ public Response getBinary(final @Context UriInfo uriInfo) { if (!rowspec.hasColumns() || rowspec.getColumns().length > 1) { servlet.getMetrics().incrementFailedGetRequests(1); return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) - .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + - "in the row. Using the 'Accept' header with one of these formats lets you " + - "retrieve the entire row if it has multiple columns: " + - // Same as the @Produces list for the get method. - MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + - MIMETYPE_PROTOBUF + ", " + MIMETYPE_PROTOBUF_IETF + - CRLF).build(); + .entity("Bad request: Default 'GET' method only works if there is exactly 1 column " + + "in the row. Using the 'Accept' header with one of these formats lets you " + + "retrieve the entire row if it has multiple columns: " + + // Same as the @Produces list for the get method. + MIMETYPE_XML + ", " + MIMETYPE_JSON + ", " + MIMETYPE_PROTOBUF + ", " + + MIMETYPE_PROTOBUF_IETF + CRLF) + .build(); } MultivaluedMap params = uriInfo.getQueryParameters(); try { - ResultGenerator generator = - ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, null, - !params.containsKey(NOCACHE_PARAM_NAME)); + ResultGenerator generator = ResultGenerator.fromRowSpec(tableResource.getName(), rowspec, + null, !params.containsKey(NOCACHE_PARAM_NAME)); if (!generator.hasNext()) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } Cell value = generator.next(); ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); @@ -185,9 +178,8 @@ Response update(final CellSetModel model, final boolean replace) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } if (CHECK_PUT.equalsIgnoreCase(check)) { @@ -199,29 +191,27 @@ Response update(final CellSetModel model, final boolean replace) { } else if (CHECK_INCREMENT.equalsIgnoreCase(check)) { return increment(model); } else if (check != null && check.length() > 0) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Invalid check value '" + check + "'" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Invalid check value '" + check + "'" + CRLF).build(); } Table table = null; try { List rows = model.getRows(); List puts = new ArrayList<>(); - for (RowModel row: rows) { + for (RowModel row : rows) { byte[] key = row.getKey(); if (key == null) { key = rowspec.getRow(); } if (key == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key not specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key not specified." + CRLF).build(); } Put put = new Put(key); int i = 0; - for (CellModel cell: row.getCells()) { + for (CellModel cell : row.getCells()) { byte[] col = cell.getColumn(); if (col == null) try { col = rowspec.getColumns()[i++]; @@ -230,24 +220,17 @@ Response update(final CellSetModel model, final boolean replace) { } if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(cell.getTimestamp()) - .setType(Type.Put) - .setValue(cell.getValue()) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) + .setType(Type.Put).setValue(cell.getValue()).build()); } puts.add(put); if (LOG.isTraceEnabled()) { @@ -272,14 +255,12 @@ Response update(final CellSetModel model, final boolean replace) { } // This currently supports only update of one row at a time. - Response updateBinary(final byte[] message, final HttpHeaders headers, - final boolean replace) { + Response updateBinary(final byte[] message, final HttpHeaders headers, final boolean replace) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } Table table = null; try { @@ -304,25 +285,18 @@ Response updateBinary(final byte[] message, final HttpHeaders headers, } if (column == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } Put put = new Put(row); byte parts[][] = CellUtil.parseColumn(column); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(timestamp) - .setType(Type.Put) - .setValue(message) - .build()); + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(timestamp).setType(Type.Put) + .setValue(message).build()); table = servlet.getTable(tableResource.getName()); table.put(put); if (LOG.isTraceEnabled()) { @@ -343,45 +317,39 @@ Response updateBinary(final byte[] message, final HttpHeaders headers, } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final CellSetModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final CellSetModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("PUT " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, true); } @PUT @Consumes(MIMETYPE_BINARY) - public Response putBinary(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response putBinary(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { - LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as "+ MIMETYPE_BINARY); + LOG.trace("PUT " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } return updateBinary(message, headers, true); } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final CellSetModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final CellSetModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("POST " + uriInfo.getAbsolutePath() - + " " + uriInfo.getQueryParameters()); + LOG.trace("POST " + uriInfo.getAbsolutePath() + " " + uriInfo.getQueryParameters()); } return update(model, false); } @POST @Consumes(MIMETYPE_BINARY) - public Response postBinary(final byte[] message, - final @Context UriInfo uriInfo, final @Context HttpHeaders headers) { + public Response postBinary(final byte[] message, final @Context UriInfo uriInfo, + final @Context HttpHeaders headers) { if (LOG.isTraceEnabled()) { - LOG.trace("POST " + uriInfo.getAbsolutePath() + " as "+MIMETYPE_BINARY); + LOG.trace("POST " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } return updateBinary(message, headers, false); } @@ -394,17 +362,14 @@ public Response delete(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } Delete delete = null; - if (rowspec.hasTimestamp()) - delete = new Delete(rowspec.getRow(), rowspec.getTimestamp()); - else - delete = new Delete(rowspec.getRow()); + if (rowspec.hasTimestamp()) delete = new Delete(rowspec.getRow(), rowspec.getTimestamp()); + else delete = new Delete(rowspec.getRow()); - for (byte[] column: rowspec.getColumns()) { + for (byte[] column : rowspec.getColumns()) { byte[][] split = CellUtil.parseColumn(column); if (rowspec.hasTimestamp()) { if (split.length == 1) { @@ -412,9 +377,8 @@ public Response delete(final @Context UriInfo uriInfo) { } else if (split.length == 2) { delete.addColumns(split[0], split[1], rowspec.getTimestamp()); } else { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } } else { if (split.length == 1) { @@ -422,9 +386,8 @@ public Response delete(final @Context UriInfo uriInfo) { } else if (split.length == 2) { delete.addColumns(split[0], split[1]); } else { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } } } @@ -450,9 +413,8 @@ public Response delete(final @Context UriInfo uriInfo) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes checkAndPut on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes + * checkAndPut on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -476,9 +438,7 @@ Response checkAndPut(final CellSetModel model) { int cellModelCount = cellModels.size(); if (key == null || cellModelCount <= 1) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response - .status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT) + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) .entity( "Bad request: Either row key is null or no data found for columns specified." + CRLF) .build(); @@ -494,34 +454,26 @@ Response checkAndPut(final CellSetModel model) { // Copy all the cells to the Put request // and track if the check cell's latest value is also sent - for (int i = 0, n = cellModelCount - 1; i < n ; i++) { + for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); } - put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY) - .setRow(put.getRow()) - .setFamily(parts[0]) - .setQualifier(parts[1]) - .setTimestamp(cell.getTimestamp()) - .setType(Type.Put) - .setValue(cell.getValue()) - .build()); - if(Bytes.equals(col, - valueToCheckCell.getColumn())) { + put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) + .setFamily(parts[0]).setQualifier(parts[1]).setTimestamp(cell.getTimestamp()) + .setType(Type.Put).setValue(cell.getValue()).build()); + if (Bytes.equals(col, valueToCheckCell.getColumn())) { valueToPutCell = cell; } } @@ -532,13 +484,12 @@ Response checkAndPut(final CellSetModel model) { .entity("Bad request: The column to put and check do not match." + CRLF).build(); } else { retValue = table.checkAndMutate(key, valueToPutParts[0]).qualifier(valueToPutParts[1]) - .ifEquals(valueToCheckCell.getValue()).thenPut(put); + .ifEquals(valueToCheckCell.getValue()).thenPut(put); } } else { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { @@ -546,9 +497,8 @@ Response checkAndPut(final CellSetModel model) { } if (!retValue) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Value not Modified" + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Value not Modified" + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulPutRequests(1); @@ -566,9 +516,8 @@ Response checkAndPut(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes checkAndDelete on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes + * checkAndDelete on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -579,9 +528,8 @@ Response checkAndDelete(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -590,9 +538,8 @@ Response checkAndDelete(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } List cellModels = rowModel.getCells(); @@ -600,31 +547,29 @@ Response checkAndDelete(final CellSetModel model) { delete = new Delete(key); boolean retValue; - CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount -1); + CellModel valueToDeleteCell = rowModel.getCells().get(cellModelCount - 1); byte[] valueToDeleteColumn = valueToDeleteCell.getColumn(); if (valueToDeleteColumn == null) { try { valueToDeleteColumn = rowspec.getColumns()[0]; } catch (final ArrayIndexOutOfBoundsException e) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column not specified for check." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column not specified for check." + CRLF).build(); } } - byte[][] parts ; + byte[][] parts; // Copy all the cells to the Delete request if extra cells are sent - if(cellModelCount > 1) { + if (cellModelCount > 1) { for (int i = 0, n = cellModelCount - 1; i < n; i++) { CellModel cell = cellModels.get(i); byte[] col = cell.getColumn(); if (col == null) { servlet.getMetrics().incrementFailedPutRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } parts = CellUtil.parseColumn(col); @@ -636,10 +581,8 @@ Response checkAndDelete(final CellSetModel model) { delete.addColumn(parts[0], parts[1], cell.getTimestamp()); } else { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT) - .entity("Bad request: Column to delete incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column to delete incorrectly specified." + CRLF).build(); } } } @@ -649,36 +592,33 @@ Response checkAndDelete(final CellSetModel model) { if (parts[1].length != 0) { // To support backcompat of deleting a cell // if that is the only cell passed to the rest api - if(cellModelCount == 1) { + if (cellModelCount == 1) { delete.addColumns(parts[0], parts[1]); } retValue = table.checkAndMutate(key, parts[0]).qualifier(parts[1]) .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); } else { // The case of empty qualifier. - if(cellModelCount == 1) { + if (cellModelCount == 1) { delete.addColumns(parts[0], Bytes.toBytes(StringUtils.EMPTY)); } - retValue = table.checkAndMutate(key, parts[0]) - .ifEquals(valueToDeleteCell.getValue()).thenDelete(delete); + retValue = table.checkAndMutate(key, parts[0]).ifEquals(valueToDeleteCell.getValue()) + .thenDelete(delete); } } else { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column to check incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column to check incorrectly specified." + CRLF).build(); } if (LOG.isTraceEnabled()) { - LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " - + retValue); + LOG.trace("CHECK-AND-DELETE " + delete.toString() + ", returns " + retValue); } if (!retValue) { servlet.getMetrics().incrementFailedDeleteRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity(" Delete check failed." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity(" Delete check failed." + CRLF).build(); } ResponseBuilder response = Response.ok(); servlet.getMetrics().incrementSucessfulDeleteRequests(1); @@ -696,9 +636,8 @@ Response checkAndDelete(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes Append on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes Append on + * HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -709,9 +648,8 @@ Response append(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -720,15 +658,14 @@ Response append(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } append = new Append(key); append.setReturnResults(returnResult); int i = 0; - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -739,16 +676,14 @@ Response append(final CellSetModel model) { } if (col == null) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } append.add(parts[0], parts[1], cell.getValue()); } @@ -760,16 +695,15 @@ Response append(final CellSetModel model) { if (returnResult) { if (result.isEmpty()) { servlet.getMetrics().incrementFailedAppendRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Append return empty." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Append return empty." + CRLF).build(); } CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + cell.getTimestamp(), CellUtil.cloneValue(cell))); } rModel.addRow(rRowModel); servlet.getMetrics().incrementSucessfulAppendRequests(1); @@ -790,9 +724,8 @@ Response append(final CellSetModel model) { } /** - * Validates the input request parameters, parses columns from CellSetModel, - * and invokes Increment on HTable. - * + * Validates the input request parameters, parses columns from CellSetModel, and invokes Increment + * on HTable. * @param model instance of CellSetModel * @return Response 200 OK, 304 Not modified, 400 Bad request */ @@ -803,9 +736,8 @@ Response increment(final CellSetModel model) { table = servlet.getTable(tableResource.getName()); if (model.getRows().size() != 1) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Number of rows specified is not 1." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Number of rows specified is not 1." + CRLF).build(); } RowModel rowModel = model.getRows().get(0); byte[] key = rowModel.getKey(); @@ -814,15 +746,14 @@ Response increment(final CellSetModel model) { } if (key == null) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Row key found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Row key found to be null." + CRLF).build(); } increment = new Increment(key); increment.setReturnResults(returnResult); int i = 0; - for (CellModel cell: rowModel.getCells()) { + for (CellModel cell : rowModel.getCells()) { byte[] col = cell.getColumn(); if (col == null) { try { @@ -833,18 +764,17 @@ Response increment(final CellSetModel model) { } if (col == null) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column found to be null." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column found to be null." + CRLF).build(); } - byte [][] parts = CellUtil.parseColumn(col); + byte[][] parts = CellUtil.parseColumn(col); if (parts.length != 2) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request: Column incorrectly specified." + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request: Column incorrectly specified." + CRLF).build(); } - increment.addColumn(parts[0], parts[1], Long.parseLong(Bytes.toStringBinary(cell.getValue()))); + increment.addColumn(parts[0], parts[1], + Long.parseLong(Bytes.toStringBinary(cell.getValue()))); } if (LOG.isDebugEnabled()) { @@ -855,16 +785,15 @@ Response increment(final CellSetModel model) { if (returnResult) { if (result.isEmpty()) { servlet.getMetrics().incrementFailedIncrementRequests(1); - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Increment return empty." + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Increment return empty." + CRLF).build(); } CellSetModel rModel = new CellSetModel(); RowModel rRowModel = new RowModel(result.getRow()); for (Cell cell : result.listCells()) { rRowModel.addCell(new CellModel(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), - cell.getTimestamp(), CellUtil.cloneValue(cell))); + cell.getTimestamp(), CellUtil.cloneValue(cell))); } rModel.addRow(rowModel); servlet.getMetrics().incrementSucessfulIncrementRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java index f3d48fd49f07..36d6fba08d86 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -21,7 +20,6 @@ import java.io.IOException; import java.util.Iterator; import java.util.NoSuchElementException; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.DoNotRetryIOException; @@ -30,11 +28,8 @@ import org.apache.hadoop.hbase.client.Table; import org.apache.hadoop.hbase.filter.Filter; import org.apache.hadoop.hbase.security.AccessDeniedException; - import org.apache.hadoop.util.StringUtils; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,9 +40,8 @@ public class RowResultGenerator extends ResultGenerator { private Iterator valuesI; private Cell cache; - public RowResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public RowResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final boolean cacheBlocks) throws IllegalArgumentException, IOException { try (Table table = RESTServlet.getInstance().getTable(tableName)) { Get get = new Get(rowspec.getRow()); if (rowspec.hasColumns()) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java index c510c9ed797d..2798544406d8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/RowSpec.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.UnsupportedEncodingException; @@ -26,22 +24,19 @@ import java.util.Collections; import java.util.List; import java.util.TreeSet; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** - * Parses a path based row/column/timestamp specification into its component - * elements. + * Parses a path based row/column/timestamp specification into its component elements. *

    - * */ @InterfaceAudience.Private public class RowSpec { public static final long DEFAULT_START_TIMESTAMP = 0; public static final long DEFAULT_END_TIMESTAMP = Long.MAX_VALUE; - + private byte[] row = HConstants.EMPTY_START_ROW; private byte[] endRow = null; private TreeSet columns = new TreeSet<>(Bytes.BYTES_COMPARATOR); @@ -62,8 +57,7 @@ public RowSpec(String path) throws IllegalArgumentException { i = parseQueryParams(path, i); } - private int parseRowKeys(final String path, int i) - throws IllegalArgumentException { + private int parseRowKeys(final String path, int i) throws IllegalArgumentException { String startRow = null, endRow = null; try { StringBuilder sb = new StringBuilder(); @@ -76,10 +70,8 @@ private int parseRowKeys(final String path, int i) String row = startRow = sb.toString(); int idx = startRow.indexOf(','); if (idx != -1) { - startRow = URLDecoder.decode(row.substring(0, idx), - HConstants.UTF8_ENCODING); - endRow = URLDecoder.decode(row.substring(idx + 1), - HConstants.UTF8_ENCODING); + startRow = URLDecoder.decode(row.substring(0, idx), HConstants.UTF8_ENCODING); + endRow = URLDecoder.decode(row.substring(idx + 1), HConstants.UTF8_ENCODING); } else { startRow = URLDecoder.decode(row, HConstants.UTF8_ENCODING); } @@ -93,13 +85,11 @@ private int parseRowKeys(final String path, int i) // table scanning if (startRow.charAt(startRow.length() - 1) == '*') { if (endRow != null) - throw new IllegalArgumentException("invalid path: start row "+ - "specified with wildcard"); - this.row = Bytes.toBytes(startRow.substring(0, - startRow.lastIndexOf("*"))); + throw new IllegalArgumentException("invalid path: start row " + "specified with wildcard"); + this.row = Bytes.toBytes(startRow.substring(0, startRow.lastIndexOf("*"))); this.endRow = new byte[this.row.length + 1]; System.arraycopy(this.row, 0, this.endRow, 0, this.row.length); - this.endRow[this.row.length] = (byte)255; + this.endRow[this.row.length] = (byte) 255; } else { this.row = Bytes.toBytes(startRow.toString()); if (endRow != null) { @@ -145,8 +135,7 @@ private int parseColumns(final String path, int i) throws IllegalArgumentExcepti return i; } - private int parseTimestamp(final String path, int i) - throws IllegalArgumentException { + private int parseTimestamp(final String path, int i) throws IllegalArgumentException { if (i >= path.length()) { return i; } @@ -163,8 +152,7 @@ private int parseTimestamp(final String path, int i) i++; } try { - time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), - HConstants.UTF8_ENCODING)); + time0 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); } @@ -176,8 +164,7 @@ private int parseTimestamp(final String path, int i) i++; } try { - time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), - HConstants.UTF8_ENCODING)); + time1 = Long.parseLong(URLDecoder.decode(stamp.toString(), HConstants.UTF8_ENCODING)); } catch (NumberFormatException e) { throw new IllegalArgumentException(e); } @@ -206,8 +193,7 @@ private int parseQueryParams(final String path, int i) { } StringBuilder query = new StringBuilder(); try { - query.append(URLDecoder.decode(path.substring(i), - HConstants.UTF8_ENCODING)); + query.append(URLDecoder.decode(path.substring(i), HConstants.UTF8_ENCODING)); } catch (UnsupportedEncodingException e) { // should not happen throw new RuntimeException(e); @@ -234,39 +220,41 @@ private int parseQueryParams(final String path, int i) { break; } switch (what) { - case 'm': { - StringBuilder sb = new StringBuilder(); - while (j <= query.length()) { - c = query.charAt(j); - if (c < '0' || c > '9') { - j--; - break; + case 'm': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); } - sb.append(c); + maxVersions = Integer.parseInt(sb.toString()); } - maxVersions = Integer.parseInt(sb.toString()); - } break; - case 'n': { - StringBuilder sb = new StringBuilder(); - while (j <= query.length()) { - c = query.charAt(j); - if (c < '0' || c > '9') { - j--; - break; + break; + case 'n': { + StringBuilder sb = new StringBuilder(); + while (j <= query.length()) { + c = query.charAt(j); + if (c < '0' || c > '9') { + j--; + break; + } + sb.append(c); } - sb.append(c); + maxValues = Integer.parseInt(sb.toString()); } - maxValues = Integer.parseInt(sb.toString()); - } break; - default: - throw new IllegalArgumentException("unknown parameter '" + c + "'"); + break; + default: + throw new IllegalArgumentException("unknown parameter '" + c + "'"); } } return i; } - public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, - long startTime, long endTime, int maxVersions) { + public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, long startTime, long endTime, + int maxVersions) { this.row = startRow; this.endRow = endRow; if (columns != null) { @@ -277,15 +265,16 @@ public RowSpec(byte[] startRow, byte[] endRow, byte[][] columns, this.maxVersions = maxVersions; } - public RowSpec(byte[] startRow, byte[] endRow, Collection columns, - long startTime, long endTime, int maxVersions, Collection labels) { + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, long startTime, + long endTime, int maxVersions, Collection labels) { this(startRow, endRow, columns, startTime, endTime, maxVersions); - if(labels != null) { + if (labels != null) { this.labels.addAll(labels); } } - public RowSpec(byte[] startRow, byte[] endRow, Collection columns, - long startTime, long endTime, int maxVersions) { + + public RowSpec(byte[] startRow, byte[] endRow, Collection columns, long startTime, + long endTime, int maxVersions) { this.row = startRow; this.endRow = endRow; if (columns != null) { @@ -319,7 +308,7 @@ public void setMaxValues(final int maxValues) { public boolean hasColumns() { return !columns.isEmpty(); } - + public boolean hasLabels() { return !labels.isEmpty(); } @@ -347,7 +336,7 @@ public void addColumn(final byte[] column) { public byte[][] getColumns() { return columns.toArray(new byte[columns.size()][]); } - + public List getLabels() { return labels; } @@ -384,11 +373,11 @@ public String toString() { result.append(Bytes.toString(row)); } result.append("', endRow => '"); - if (endRow != null) { + if (endRow != null) { result.append(Bytes.toString(endRow)); } result.append("', columns => ["); - for (byte[] col: columns) { + for (byte[] col : columns) { result.append(" '"); result.append(Bytes.toString(col)); result.append("'"); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java index 4bbc2cf11261..bcc2ac49ddc3 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerInstanceResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -44,8 +42,7 @@ @InterfaceAudience.Private public class ScannerInstanceResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(ScannerInstanceResource.class); + private static final Logger LOG = LoggerFactory.getLogger(ScannerInstanceResource.class); static CacheControl cacheControl; static { @@ -58,29 +55,28 @@ public class ScannerInstanceResource extends ResourceBase { String id = null; int batch = 1; - public ScannerInstanceResource() throws IOException { } + public ScannerInstanceResource() throws IOException { + } - public ScannerInstanceResource(String table, String id, - ResultGenerator generator, int batch) throws IOException { + public ScannerInstanceResource(String table, String id, ResultGenerator generator, int batch) + throws IOException { this.id = id; this.generator = generator; this.batch = batch; } @GET - @Produces({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context UriInfo uriInfo, - @QueryParam("n") int maxRows, final @QueryParam("c") int maxValues) { + @Produces({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context UriInfo uriInfo, @QueryParam("n") int maxRows, + final @QueryParam("c") int maxValues) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); if (generator == null) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } else { // Updated the connection access time for each client next() call RESTServlet.getInstance().getConnectionCache().updateConnectionAccessTime(); @@ -104,15 +100,13 @@ public Response get(final @Context UriInfo uriInfo, servlet.getMetrics().incrementFailedDeleteRequests(1); } servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.GONE) - .type(MIMETYPE_TEXT).entity("Gone" + CRLF) - .build(); + return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF) + .build(); } catch (IllegalArgumentException e) { Throwable t = e.getCause(); if (t instanceof TableNotFoundException) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } throw e; } @@ -144,8 +138,7 @@ public Response get(final @Context UriInfo uriInfo, rowKey = CellUtil.cloneRow(value); rowModel = new RowModel(rowKey); } - rowModel.addCell( - new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), + rowModel.addCell(new CellModel(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value), value.getTimestamp(), CellUtil.cloneValue(value))); } while (--count > 0); model.addRow(rowModel); @@ -159,8 +152,7 @@ public Response get(final @Context UriInfo uriInfo, @Produces(MIMETYPE_BINARY) public Response getBinary(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + - MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { @@ -173,10 +165,10 @@ public Response getBinary(final @Context UriInfo uriInfo) { } ResponseBuilder response = Response.ok(CellUtil.cloneValue(value)); response.cacheControl(cacheControl); - response.header("X-Row", Bytes.toString(Base64.getEncoder().encode( - CellUtil.cloneRow(value)))); + response.header("X-Row", + Bytes.toString(Base64.getEncoder().encode(CellUtil.cloneRow(value)))); response.header("X-Column", Bytes.toString(Base64.getEncoder().encode( - CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); + CellUtil.makeColumn(CellUtil.cloneFamily(value), CellUtil.cloneQualifier(value))))); response.header("X-Timestamp", value.getTimestamp()); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); @@ -187,9 +179,8 @@ public Response getBinary(final @Context UriInfo uriInfo) { servlet.getMetrics().incrementFailedDeleteRequests(1); } servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.GONE) - .type(MIMETYPE_TEXT).entity("Gone" + CRLF) - .build(); + return Response.status(Response.Status.GONE).type(MIMETYPE_TEXT).entity("Gone" + CRLF) + .build(); } } @@ -200,9 +191,8 @@ public Response delete(final @Context UriInfo uriInfo) { } servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } if (ScannerResource.delete(id)) { servlet.getMetrics().incrementSucessfulDeleteRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java index 785c7c4b34a4..eeef08e1b761 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import com.fasterxml.jackson.core.JsonParseException; @@ -48,8 +46,8 @@ public class ScannerResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(ScannerResource.class); - static final Map scanners = - Collections.synchronizedMap(new HashMap()); + static final Map scanners = + Collections.synchronizedMap(new HashMap()); TableResource tableResource; @@ -58,7 +56,7 @@ public class ScannerResource extends ResourceBase { * @param tableResource * @throws IOException */ - public ScannerResource(TableResource tableResource)throws IOException { + public ScannerResource(TableResource tableResource) throws IOException { super(); this.tableResource = tableResource; } @@ -73,13 +71,11 @@ static boolean delete(final String id) { } } - Response update(final ScannerModel model, final boolean replace, - final UriInfo uriInfo) { + Response update(final ScannerModel model, final boolean replace, final UriInfo uriInfo) { servlet.getMetrics().incrementRequests(1); if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } byte[] endRow = model.hasEndRow() ? model.getEndRow() : null; RowSpec spec = null; @@ -94,12 +90,11 @@ Response update(final ScannerModel model, final boolean replace, try { Filter filter = ScannerResultGenerator.buildFilterFromModel(model); String tableName = tableResource.getName(); - ScannerResultGenerator gen = - new ScannerResultGenerator(tableName, spec, filter, model.getCaching(), - model.getCacheBlocks()); + ScannerResultGenerator gen = new ScannerResultGenerator(tableName, spec, filter, + model.getCaching(), model.getCacheBlocks()); String id = gen.getID(); ScannerInstanceResource instance = - new ScannerInstanceResource(tableName, id, gen, model.getBatch()); + new ScannerInstanceResource(tableName, id, gen, model.getBatch()); scanners.put(id, instance); if (LOG.isTraceEnabled()) { LOG.trace("new scanner: " + id); @@ -112,26 +107,21 @@ Response update(final ScannerModel model, final boolean replace, LOG.error("Exception occurred while processing " + uriInfo.getAbsolutePath() + " : ", e); servlet.getMetrics().incrementFailedPutRequests(1); if (e instanceof TableNotFoundException) { - return Response.status(Response.Status.NOT_FOUND) - .type(MIMETYPE_TEXT).entity("Not found" + CRLF) - .build(); + return Response.status(Response.Status.NOT_FOUND).type(MIMETYPE_TEXT) + .entity("Not found" + CRLF).build(); } else if (e instanceof RuntimeException || e instanceof JsonMappingException | e instanceof JsonParseException) { - return Response.status(Response.Status.BAD_REQUEST) - .type(MIMETYPE_TEXT).entity("Bad request" + CRLF) - .build(); - } - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.BAD_REQUEST).type(MIMETYPE_TEXT) + .entity("Bad request" + CRLF).build(); + } + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final ScannerModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final ScannerModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -139,10 +129,8 @@ public Response put(final ScannerModel model, } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final ScannerModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final ScannerModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("POST " + uriInfo.getAbsolutePath()); } @@ -150,8 +138,8 @@ public Response post(final ScannerModel model, } @Path("{scanner: .+}") - public ScannerInstanceResource getScannerInstanceResource( - final @PathParam("scanner") String id) throws IOException { + public ScannerInstanceResource getScannerInstanceResource(final @PathParam("scanner") String id) + throws IOException { ScannerInstanceResource instance = scanners.get(id); if (instance == null) { servlet.getMetrics().incrementFailedGetRequests(1); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java index d31b4b1a8071..8a31a51fe972 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/ScannerResultGenerator.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; import java.util.Iterator; - import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.TableNotEnabledException; @@ -43,11 +40,9 @@ @InterfaceAudience.Private public class ScannerResultGenerator extends ResultGenerator { - private static final Logger LOG = - LoggerFactory.getLogger(ScannerResultGenerator.class); + private static final Logger LOG = LoggerFactory.getLogger(ScannerResultGenerator.class); - public static Filter buildFilterFromModel(final ScannerModel model) - throws Exception { + public static Filter buildFilterFromModel(final ScannerModel model) throws Exception { String filter = model.getFilter(); if (filter == null || filter.length() == 0) { return null; @@ -61,15 +56,13 @@ public static Filter buildFilterFromModel(final ScannerModel model) private ResultScanner scanner; private Result cached; - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final boolean cacheBlocks) throws IllegalArgumentException, IOException { this(tableName, rowspec, filter, -1, cacheBlocks); } - public ScannerResultGenerator(final String tableName, final RowSpec rowspec, - final Filter filter, final int caching, final boolean cacheBlocks) - throws IllegalArgumentException, IOException { + public ScannerResultGenerator(final String tableName, final RowSpec rowspec, final Filter filter, + final int caching, final boolean cacheBlocks) throws IllegalArgumentException, IOException { Table table = RESTServlet.getInstance().getTable(tableName); try { Scan scan; @@ -80,7 +73,7 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, } if (rowspec.hasColumns()) { byte[][] columns = rowspec.getColumns(); - for (byte[] column: columns) { + for (byte[] column : columns) { byte[][] split = CellUtil.parseColumn(column); if (split.length == 1) { scan.addFamily(split[0]); @@ -91,12 +84,12 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, } } } - scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); + scan.setTimeRange(rowspec.getStartTime(), rowspec.getEndTime()); scan.setMaxVersions(rowspec.getMaxVersions()); if (filter != null) { scan.setFilter(filter); } - if (caching > 0 ) { + if (caching > 0) { scan.setCaching(caching); } scan.setCacheBlocks(cacheBlocks); @@ -105,8 +98,8 @@ public ScannerResultGenerator(final String tableName, final RowSpec rowspec, } scanner = table.getScanner(scan); cached = null; - id = Long.toString(EnvironmentEdgeManager.currentTime()) + - Integer.toHexString(scanner.hashCode()); + id = Long.toString(EnvironmentEdgeManager.currentTime()) + + Integer.toHexString(scanner.hashCode()); } finally { table.close(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java index b0fc02760180..519f4d8373b2 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/SchemaResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -72,8 +70,7 @@ public SchemaResource(TableResource tableResource) throws IOException { this.tableResource = tableResource; } - private HTableDescriptor getTableSchema() throws IOException, - TableNotFoundException { + private HTableDescriptor getTableSchema() throws IOException, TableNotFoundException { Table table = servlet.getTable(tableResource.getName()); try { return table.getTableDescriptor(); @@ -83,16 +80,15 @@ private HTableDescriptor getTableSchema() throws IOException, } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { - ResponseBuilder response = - Response.ok(new TableSchemaModel(getTableSchema())); + ResponseBuilder response = Response.ok(new TableSchemaModel(getTableSchema())); response.cacheControl(cacheControl); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); @@ -105,18 +101,17 @@ public Response get(final @Context UriInfo uriInfo) { private Response replace(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, final Admin admin) { if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } try { HTableDescriptor htd = new HTableDescriptor(name); - for (Map.Entry e: model.getAny().entrySet()) { + for (Map.Entry e : model.getAny().entrySet()) { htd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } - for (ColumnSchemaModel family: model.getColumns()) { + for (ColumnSchemaModel family : model.getColumns()) { HColumnDescriptor hcd = new HColumnDescriptor(family.getName()); - for (Map.Entry e: family.getAny().entrySet()) { + for (Map.Entry e : family.getAny().entrySet()) { hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } htd.addFamily(hcd); @@ -131,9 +126,8 @@ private Response replace(final TableName name, final TableSchemaModel model, servlet.getMetrics().incrementSucessfulPutRequests(1); } catch (TableExistsException e) { // race, someone else created a table with the same name - return Response.status(Response.Status.NOT_MODIFIED) - .type(MIMETYPE_TEXT).entity("Not modified" + CRLF) - .build(); + return Response.status(Response.Status.NOT_MODIFIED).type(MIMETYPE_TEXT) + .entity("Not modified" + CRLF).build(); } return Response.created(uriInfo.getAbsolutePath()).build(); } catch (Exception e) { @@ -143,20 +137,19 @@ private Response replace(final TableName name, final TableSchemaModel model, } } - private Response update(final TableName name, final TableSchemaModel model, - final UriInfo uriInfo, final Admin admin) { + private Response update(final TableName name, final TableSchemaModel model, final UriInfo uriInfo, + final Admin admin) { if (servlet.isReadOnly()) { - return Response.status(Response.Status.FORBIDDEN) - .type(MIMETYPE_TEXT).entity("Forbidden" + CRLF) - .build(); + return Response.status(Response.Status.FORBIDDEN).type(MIMETYPE_TEXT) + .entity("Forbidden" + CRLF).build(); } try { HTableDescriptor htd = admin.getTableDescriptor(name); admin.disableTable(name); try { - for (ColumnSchemaModel family: model.getColumns()) { + for (ColumnSchemaModel family : model.getColumns()) { HColumnDescriptor hcd = new HColumnDescriptor(family.getName()); - for (Map.Entry e: family.getAny().entrySet()) { + for (Map.Entry e : family.getAny().entrySet()) { hcd.setValue(e.getKey().getLocalPart(), e.getValue().toString()); } if (htd.hasFamily(hcd.getName())) { @@ -166,9 +159,8 @@ private Response update(final TableName name, final TableSchemaModel model, } } } catch (IOException e) { - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } finally { admin.enableTable(TableName.valueOf(tableResource.getName())); } @@ -201,10 +193,8 @@ private Response update(final TableSchemaModel model, final boolean replace, } @PUT - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response put(final TableSchemaModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response put(final TableSchemaModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -213,10 +203,8 @@ public Response put(final TableSchemaModel model, } @POST - @Consumes({MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response post(final TableSchemaModel model, - final @Context UriInfo uriInfo) { + @Consumes({ MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, MIMETYPE_PROTOBUF_IETF }) + public Response post(final TableSchemaModel model, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("PUT " + uriInfo.getAbsolutePath()); } @@ -224,8 +212,8 @@ public Response post(final TableSchemaModel model, return update(model, false, uriInfo); } - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="DE_MIGHT_IGNORE", - justification="Expected") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "DE_MIGHT_IGNORE", + justification = "Expected") @DELETE public Response delete(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { @@ -240,7 +228,8 @@ public Response delete(final @Context UriInfo uriInfo) { Admin admin = servlet.getAdmin(); try { admin.disableTable(TableName.valueOf(tableResource.getName())); - } catch (TableNotEnabledException e) { /* this is what we want anyway */ } + } catch (TableNotEnabledException e) { + /* this is what we want anyway */ } admin.deleteTable(TableName.valueOf(tableResource.getName())); servlet.getMetrics().incrementSucessfulDeleteRequests(1); return Response.ok().build(); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java index 0947058aaa05..1f69505f398e 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterStatusResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -43,8 +41,7 @@ @InterfaceAudience.Private public class StorageClusterStatusResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(StorageClusterStatusResource.class); + private static final Logger LOG = LoggerFactory.getLogger(StorageClusterStatusResource.class); static CacheControl cacheControl; static { @@ -62,46 +59,41 @@ public StorageClusterStatusResource() throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } servlet.getMetrics().incrementRequests(1); try { - ClusterMetrics status = servlet.getAdmin().getClusterMetrics( - EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); + ClusterMetrics status = servlet.getAdmin() + .getClusterMetrics(EnumSet.of(Option.LIVE_SERVERS, Option.DEAD_SERVERS)); StorageClusterStatusModel model = new StorageClusterStatusModel(); model.setRegions(status.getRegionCount()); model.setRequests(status.getRequestCount()); model.setAverageLoad(status.getAverageLoad()); - for (Map.Entry entry: status.getLiveServerMetrics().entrySet()) { + for (Map.Entry entry : status.getLiveServerMetrics().entrySet()) { ServerName sn = entry.getKey(); ServerMetrics load = entry.getValue(); StorageClusterStatusModel.Node node = - model.addLiveNode( - sn.getHostname() + ":" + - Integer.toString(sn.getPort()), - sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), - (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE)); + model.addLiveNode(sn.getHostname() + ":" + Integer.toString(sn.getPort()), + sn.getStartcode(), (int) load.getUsedHeapSize().get(Size.Unit.MEGABYTE), + (int) load.getMaxHeapSize().get(Size.Unit.MEGABYTE)); node.setRequests(load.getRequestCount()); - for (RegionMetrics region: load.getRegionMetrics().values()) { - node.addRegion(region.getRegionName(), region.getStoreCount(), - region.getStoreFileCount(), + for (RegionMetrics region : load.getRegionMetrics().values()) { + node.addRegion(region.getRegionName(), region.getStoreCount(), region.getStoreFileCount(), (int) region.getStoreFileSize().get(Size.Unit.MEGABYTE), (int) region.getMemStoreSize().get(Size.Unit.MEGABYTE), (long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE), - region.getReadRequestCount(), - region.getWriteRequestCount(), + region.getReadRequestCount(), region.getWriteRequestCount(), (int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE), (int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE), (int) region.getBloomFilterSize().get(Size.Unit.KILOBYTE), - region.getCompactingCellCount(), - region.getCompactedCellCount()); + region.getCompactingCellCount(), region.getCompactedCellCount()); } } - for (ServerName name: status.getDeadServerNames()) { + for (ServerName name : status.getDeadServerNames()) { model.addDeadNode(name.toString()); } ResponseBuilder response = Response.ok(model); @@ -110,9 +102,8 @@ public Response get(final @Context UriInfo uriInfo) { return response.build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java index ffa17e442394..7cfc52e4d9ad 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/StorageClusterVersionResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -37,8 +35,7 @@ @InterfaceAudience.Private public class StorageClusterVersionResource extends ResourceBase { - private static final Logger LOG = - LoggerFactory.getLogger(StorageClusterVersionResource.class); + private static final Logger LOG = LoggerFactory.getLogger(StorageClusterVersionResource.class); static CacheControl cacheControl; static { @@ -56,7 +53,7 @@ public StorageClusterVersionResource() throws IOException { } @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON}) + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON }) public Response get(final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); @@ -65,17 +62,15 @@ public Response get(final @Context UriInfo uriInfo) { try { StorageClusterVersionModel model = new StorageClusterVersionModel(); model.setVersion( - servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)) - .getHBaseVersion()); + servlet.getAdmin().getClusterMetrics(EnumSet.of(Option.HBASE_VERSION)).getHBaseVersion()); ResponseBuilder response = Response.ok(model); response.cacheControl(cacheControl); servlet.getMetrics().incrementSucessfulGetRequests(1); return response.build(); } catch (IOException e) { servlet.getMetrics().incrementFailedGetRequests(1); - return Response.status(Response.Status.SERVICE_UNAVAILABLE) - .type(MIMETYPE_TEXT).entity("Unavailable" + CRLF) - .build(); + return Response.status(Response.Status.SERVICE_UNAVAILABLE).type(MIMETYPE_TEXT) + .entity("Unavailable" + CRLF).build(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java index 7f62ef99f1d6..17a24f8fea45 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -100,10 +98,9 @@ public MultiRowResource getMultipleRowResource(final @QueryParam("v") String ver public RowResource getRowResource( // We need the @Encoded decorator so Jersey won't urldecode before // the RowSpec constructor has a chance to parse - final @PathParam("rowspec") @Encoded String rowspec, - final @QueryParam("v") String versions, - final @QueryParam("check") String check, - final @QueryParam("rr") String returnResult) throws IOException { + final @PathParam("rowspec") @Encoded String rowspec, final @QueryParam("v") String versions, + final @QueryParam("check") String check, final @QueryParam("rr") String returnResult) + throws IOException { return new RowResource(this, rowspec, versions, check, returnResult); } @@ -112,17 +109,15 @@ public RowResource getRowResourceWithSuffixGlobbing( // We need the @Encoded decorator so Jersey won't urldecode before // the RowSpec constructor has a chance to parse final @PathParam("suffixglobbingspec") @Encoded String suffixglobbingspec, - final @QueryParam("v") String versions, - final @QueryParam("check") String check, + final @QueryParam("v") String versions, final @QueryParam("check") String check, final @QueryParam("rr") String returnResult) throws IOException { return new RowResource(this, suffixglobbingspec, versions, check, returnResult); } @Path("{scanspec: .*[*]$}") - public TableScanResource getScanResource( - final @PathParam("scanspec") String scanSpec, - @DefaultValue(Integer.MAX_VALUE + "") - @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, + public TableScanResource getScanResource(final @PathParam("scanspec") String scanSpec, + @DefaultValue(Integer.MAX_VALUE + + "") @QueryParam(Constants.SCAN_LIMIT) int userRequestedLimit, @DefaultValue("") @QueryParam(Constants.SCAN_START_ROW) String startRow, @DefaultValue("") @QueryParam(Constants.SCAN_END_ROW) String endRow, @QueryParam(Constants.SCAN_COLUMN) List column, @@ -159,7 +154,7 @@ public TableScanResource getScanResource( } tableScan.setStopRow(Bytes.toBytes(endRow)); for (String col : column) { - byte [][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); + byte[][] parts = CellUtil.parseColumn(Bytes.toBytes(col.trim())); if (parts.length == 1) { if (LOG.isTraceEnabled()) { LOG.trace("Scan family : " + Bytes.toStringBinary(parts[0])); @@ -167,8 +162,8 @@ public TableScanResource getScanResource( tableScan.addFamily(parts[0]); } else if (parts.length == 2) { if (LOG.isTraceEnabled()) { - LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) - + " " + Bytes.toStringBinary(parts[1])); + LOG.trace("Scan family and column : " + Bytes.toStringBinary(parts[0]) + " " + + Bytes.toStringBinary(parts[1])); } tableScan.addColumn(parts[0], parts[1]); } else { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java index d31a346757b8..cadddc54dc13 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/TableScanResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -48,7 +47,7 @@ import org.apache.hbase.thirdparty.javax.ws.rs.core.UriInfo; @InterfaceAudience.Private -public class TableScanResource extends ResourceBase { +public class TableScanResource extends ResourceBase { private static final Logger LOG = LoggerFactory.getLogger(TableScanResource.class); TableResource tableResource; @@ -108,18 +107,16 @@ public RowModel next() { @GET @Produces({ Constants.MIMETYPE_PROTOBUF, Constants.MIMETYPE_PROTOBUF_IETF }) - public Response getProtobuf( - final @Context UriInfo uriInfo, + public Response getProtobuf(final @Context UriInfo uriInfo, final @HeaderParam("Accept") String contentType) { if (LOG.isTraceEnabled()) { - LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + - MIMETYPE_BINARY); + LOG.trace("GET " + uriInfo.getAbsolutePath() + " as " + MIMETYPE_BINARY); } servlet.getMetrics().incrementRequests(1); try { int fetchSize = this.servlet.getConfiguration().getInt(Constants.SCAN_FETCH_SIZE, 10); - StreamingOutput stream = new ProtobufStreamingOutput(this.results, contentType, - userRequestedLimit, fetchSize); + StreamingOutput stream = + new ProtobufStreamingOutput(this.results, contentType, userRequestedLimit, fetchSize); servlet.getMetrics().incrementSucessfulScanRequests(1); ResponseBuilder response = Response.ok(stream); response.header("content-type", contentType); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java index e12ff9907b86..2868c046d2c2 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/VersionResource.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest; import java.io.IOException; @@ -69,10 +67,9 @@ public VersionResource() throws IOException { * @return a response for a version request */ @GET - @Produces({MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, - MIMETYPE_PROTOBUF_IETF}) - public Response get(final @Context ServletContext context, - final @Context UriInfo uriInfo) { + @Produces({ MIMETYPE_TEXT, MIMETYPE_XML, MIMETYPE_JSON, MIMETYPE_PROTOBUF, + MIMETYPE_PROTOBUF_IETF }) + public Response get(final @Context ServletContext context, final @Context UriInfo uriInfo) { if (LOG.isTraceEnabled()) { LOG.trace("GET " + uriInfo.getAbsolutePath()); } @@ -87,8 +84,7 @@ public Response get(final @Context ServletContext context, * Dispatch to StorageClusterVersionResource */ @Path("cluster") - public StorageClusterVersionResource getClusterVersionResource() - throws IOException { + public StorageClusterVersionResource getClusterVersionResource() throws IOException { return new StorageClusterVersionResource(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java index 7459f8af0ad7..d6814a2fb196 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Client.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.io.BufferedInputStream; @@ -68,8 +66,8 @@ import org.slf4j.LoggerFactory; /** - * A wrapper around HttpClient which provides some useful function and - * semantics for interacting with the REST gateway. + * A wrapper around HttpClient which provides some useful function and semantics for interacting + * with the REST gateway. */ @InterfaceAudience.Public public class Client { @@ -111,11 +109,10 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, Constants.DEFAULT_REST_CLIENT_CONN_TIMEOUT); int socketTimeout = this.conf.getInt(Constants.REST_CLIENT_SOCKET_TIMEOUT, Constants.DEFAULT_REST_CLIENT_SOCKET_TIMEOUT); - RequestConfig requestConfig = RequestConfig.custom() - .setConnectTimeout(connTimeout) - .setSocketTimeout(socketTimeout) - .setNormalizeUri(false) // URIs should not be normalized, see HBASE-26903 - .build(); + RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(connTimeout) + .setSocketTimeout(socketTimeout).setNormalizeUri(false) // URIs should not be normalized, + // see HBASE-26903 + .build(); httpClientBuilder.setDefaultRequestConfig(requestConfig); // Since HBASE-25267 we don't use the deprecated DefaultHttpClient anymore. @@ -124,10 +121,10 @@ private void initialize(Cluster cluster, Configuration conf, boolean sslEnabled, // automatic content compression. httpClientBuilder.disableContentCompression(); - if(sslEnabled && trustStore.isPresent()) { + if (sslEnabled && trustStore.isPresent()) { try { SSLContext sslcontext = - SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); + SSLContexts.custom().loadTrustMaterial(trustStore.get(), null).build(); httpClientBuilder.setSSLContext(sslcontext); } catch (NoSuchAlgorithmException | KeyStoreException | KeyManagementException e) { throw new ClientTrustStoreInitializationException("Error while processing truststore", e); @@ -166,12 +163,10 @@ public Client(Cluster cluster, Configuration conf, boolean sslEnabled) { /** * Constructor, allowing to define custom trust store (only for SSL connections) - * * @param cluster the cluster definition * @param trustStorePath custom trust store to use for SSL connections * @param trustStorePassword password to use for custom trust store * @param trustStoreType type of custom trust store - * * @throws ClientTrustStoreInitializationException if the trust store file can not be loaded */ public Client(Cluster cluster, String trustStorePath, Optional trustStorePassword, @@ -181,7 +176,6 @@ public Client(Cluster cluster, String trustStorePath, Optional trustStor /** * Constructor, allowing to define custom trust store (only for SSL connections) - * * @param cluster the cluster definition * @param conf Configuration * @param trustStorePath custom trust store to use for SSL connections @@ -201,12 +195,12 @@ public Client(Cluster cluster, Configuration conf, String trustStorePath, } catch (KeyStoreException e) { throw new ClientTrustStoreInitializationException("Invalid trust store type: " + type, e); } - try (InputStream inputStream = new BufferedInputStream( - Files.newInputStream(new File(trustStorePath).toPath()))) { + try (InputStream inputStream = + new BufferedInputStream(Files.newInputStream(new File(trustStorePath).toPath()))) { trustStore.load(inputStream, password); } catch (CertificateException | NoSuchAlgorithmException | IOException e) { throw new ClientTrustStoreInitializationException("Trust store load error: " + trustStorePath, - e); + e); } initialize(cluster, conf, true, Optional.of(trustStore)); @@ -226,9 +220,8 @@ public HttpClient getHttpClient() { } /** - * Add extra headers. These extra headers will be applied to all http - * methods before they are removed. If any header is not used any more, - * client needs to remove it explicitly. + * Add extra headers. These extra headers will be applied to all http methods before they are + * removed. If any header is not used any more, client needs to remove it explicitly. */ public void addExtraHeader(final String name, final String value) { extraHeaders.put(name, value); @@ -256,11 +249,10 @@ public void removeExtraHeader(final String name) { } /** - * Execute a transaction method given only the path. Will select at random - * one of the members of the supplied cluster definition and iterate through - * the list until a transaction can be successfully completed. The - * definition of success here is a complete HTTP transaction, irrespective - * of result code. + * Execute a transaction method given only the path. Will select at random one of the members of + * the supplied cluster definition and iterate through the list until a transaction can be + * successfully completed. The definition of success here is a complete HTTP transaction, + * irrespective of result code. * @param cluster the cluster definition * @param method the transaction method * @param headers HTTP header values to send @@ -268,13 +260,13 @@ public void removeExtraHeader(final String name) { * @return the HTTP response code * @throws IOException */ - public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, - Header[] headers, String path) throws IOException { + public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, Header[] headers, + String path) throws IOException { IOException lastException; if (cluster.nodes.size() < 1) { throw new IOException("Cluster is empty"); } - int start = (int)Math.round((cluster.nodes.size() - 1) * Math.random()); + int start = (int) Math.round((cluster.nodes.size() - 1) * Math.random()); int i = start; do { cluster.lastHost = cluster.nodes.get(i); @@ -326,11 +318,11 @@ public HttpResponse executePathOnly(Cluster cluster, HttpUriRequest method, public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String uri) throws IOException { // method.setURI(new URI(uri, true)); - for (Map.Entry e: extraHeaders.entrySet()) { + for (Map.Entry e : extraHeaders.entrySet()) { method.addHeader(e.getKey(), e.getValue()); } if (headers != null) { - for (Header header: headers) { + for (Header header : headers) { method.addHeader(header); } } @@ -346,16 +338,16 @@ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String u long endTime = EnvironmentEdgeManager.currentTime(); if (LOG.isTraceEnabled()) { - LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + - resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); + LOG.trace(method.getMethod() + " " + uri + " " + resp.getStatusLine().getStatusCode() + " " + + resp.getStatusLine().getReasonPhrase() + " in " + (endTime - startTime) + " ms"); } return resp; } /** - * Execute a transaction method. Will call either executePathOnly - * or executeURI depending on whether a path only is supplied in - * 'path', or if a complete URI is passed instead, respectively. + * Execute a transaction method. Will call either executePathOnly or executeURI + * depending on whether a path only is supplied in 'path', or if a complete URI is passed instead, + * respectively. * @param cluster the cluster definition * @param method the HTTP method * @param headers HTTP header values to send @@ -363,8 +355,8 @@ public HttpResponse executeURI(HttpUriRequest method, Header[] headers, String u * @return the HTTP response code * @throws IOException */ - public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, - String path) throws IOException { + public HttpResponse execute(Cluster cluster, HttpUriRequest method, Header[] headers, String path) + throws IOException { if (path.startsWith("/")) { return executePathOnly(cluster, method, headers, path); } @@ -437,8 +429,7 @@ public Response head(String path) throws IOException { * @return a Response object with response detail * @throws IOException */ - public Response head(Cluster cluster, String path, Header[] headers) - throws IOException { + public Response head(Cluster cluster, String path, Header[] headers) throws IOException { HttpHead method = new HttpHead(path); try { HttpResponse resp = execute(cluster, method, null, path); @@ -488,8 +479,7 @@ public Response get(String path, String accept) throws IOException { * @return a Response object with response detail * @throws IOException */ - public Response get(Cluster cluster, String path, String accept) - throws IOException { + public Response get(Cluster cluster, String path, String accept) throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Accept", accept); return get(cluster, path, headers); @@ -498,8 +488,7 @@ public Response get(Cluster cluster, String path, String accept) /** * Send a GET request * @param path the path or URI - * @param headers the HTTP headers to include in the request, - * Accept must be supplied + * @param headers the HTTP headers to include in the request, Accept must be supplied * @return a Response object with response detail * @throws IOException */ @@ -508,32 +497,28 @@ public Response get(String path, Header[] headers) throws IOException { } /** - * Returns the response body of the HTTPResponse, if any, as an array of bytes. - * If response body is not available or cannot be read, returns null - * - * Note: This will cause the entire response body to be buffered in memory. A - * malicious server may easily exhaust all the VM memory. It is strongly - * recommended, to use getResponseAsStream if the content length of the response - * is unknown or reasonably large. - * + * Returns the response body of the HTTPResponse, if any, as an array of bytes. If response body + * is not available or cannot be read, returns null Note: This will cause the entire + * response body to be buffered in memory. A malicious server may easily exhaust all the VM + * memory. It is strongly recommended, to use getResponseAsStream if the content length of the + * response is unknown or reasonably large. * @param resp HttpResponse * @return The response body, null if body is empty - * @throws IOException If an I/O (transport) problem occurs while obtaining the - * response body. + * @throws IOException If an I/O (transport) problem occurs while obtaining the response body. */ - @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = - "NP_LOAD_OF_KNOWN_NULL_VALUE", justification = "null is possible return value") + @edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "NP_LOAD_OF_KNOWN_NULL_VALUE", + justification = "null is possible return value") public static byte[] getResponseBody(HttpResponse resp) throws IOException { if (resp.getEntity() == null) return null; try (InputStream instream = resp.getEntity().getContent()) { if (instream != null) { long contentLength = resp.getEntity().getContentLength(); if (contentLength > Integer.MAX_VALUE) { - //guard integer cast from overflow - throw new IOException("Content too large to be buffered: " + contentLength +" bytes"); + // guard integer cast from overflow + throw new IOException("Content too large to be buffered: " + contentLength + " bytes"); } - ByteArrayOutputStream outstream = new ByteArrayOutputStream( - contentLength > 0 ? (int) contentLength : 4*1024); + ByteArrayOutputStream outstream = + new ByteArrayOutputStream(contentLength > 0 ? (int) contentLength : 4 * 1024); byte[] buffer = new byte[4096]; int len; while ((len = instream.read(buffer)) > 0) { @@ -554,15 +539,14 @@ public static byte[] getResponseBody(HttpResponse resp) throws IOException { * @return a Response object with response detail * @throws IOException */ - public Response get(Cluster c, String path, Header[] headers) - throws IOException { + public Response get(Cluster c, String path, Header[] headers) throws IOException { if (httpGet != null) { httpGet.releaseConnection(); } httpGet = new HttpGet(path); HttpResponse resp = execute(c, httpGet, headers, path); - return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), - resp, resp.getEntity() == null ? null : resp.getEntity().getContent()); + return new Response(resp.getStatusLine().getStatusCode(), resp.getAllHeaders(), resp, + resp.getEntity() == null ? null : resp.getEntity().getContent()); } /** @@ -573,8 +557,7 @@ public Response get(Cluster c, String path, Header[] headers) * @return a Response object with response detail * @throws IOException */ - public Response put(String path, String contentType, byte[] content) - throws IOException { + public Response put(String path, String contentType, byte[] content) throws IOException { return put(cluster, path, contentType, content); } @@ -601,8 +584,8 @@ public Response put(String path, String contentType, byte[] content, Header extr * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, - byte[] content) throws IOException { + public Response put(Cluster cluster, String path, String contentType, byte[] content) + throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Content-Type", contentType); return put(cluster, path, headers, content); @@ -618,8 +601,8 @@ public Response put(Cluster cluster, String path, String contentType, * @return a Response object with response detail * @throws IOException for error */ - public Response put(Cluster cluster, String path, String contentType, - byte[] content, Header extraHdr) throws IOException { + public Response put(Cluster cluster, String path, String contentType, byte[] content, + Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; headers[0] = new BasicHeader("Content-Type", contentType); @@ -632,14 +615,12 @@ public Response put(Cluster cluster, String path, String contentType, /** * Send a PUT request * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes * @return a Response object with response detail * @throws IOException */ - public Response put(String path, Header[] headers, byte[] content) - throws IOException { + public Response put(String path, Header[] headers, byte[] content) throws IOException { return put(cluster, path, headers, content); } @@ -647,14 +628,13 @@ public Response put(String path, Header[] headers, byte[] content) * Send a PUT request * @param cluster the cluster definition * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes * @return a Response object with response detail * @throws IOException */ - public Response put(Cluster cluster, String path, Header[] headers, - byte[] content) throws IOException { + public Response put(Cluster cluster, String path, Header[] headers, byte[] content) + throws IOException { HttpPut method = new HttpPut(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); @@ -675,8 +655,7 @@ public Response put(Cluster cluster, String path, Header[] headers, * @return a Response object with response detail * @throws IOException */ - public Response post(String path, String contentType, byte[] content) - throws IOException { + public Response post(String path, String contentType, byte[] content) throws IOException { return post(cluster, path, contentType, content); } @@ -703,8 +682,8 @@ public Response post(String path, String contentType, byte[] content, Header ext * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, - byte[] content) throws IOException { + public Response post(Cluster cluster, String path, String contentType, byte[] content) + throws IOException { Header[] headers = new Header[1]; headers[0] = new BasicHeader("Content-Type", contentType); return post(cluster, path, headers, content); @@ -720,8 +699,8 @@ public Response post(Cluster cluster, String path, String contentType, * @return a Response object with response detail * @throws IOException for error */ - public Response post(Cluster cluster, String path, String contentType, - byte[] content, Header extraHdr) throws IOException { + public Response post(Cluster cluster, String path, String contentType, byte[] content, + Header extraHdr) throws IOException { int cnt = extraHdr == null ? 1 : 2; Header[] headers = new Header[cnt]; headers[0] = new BasicHeader("Content-Type", contentType); @@ -734,14 +713,12 @@ public Response post(Cluster cluster, String path, String contentType, /** * Send a POST request * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes * @return a Response object with response detail * @throws IOException */ - public Response post(String path, Header[] headers, byte[] content) - throws IOException { + public Response post(String path, Header[] headers, byte[] content) throws IOException { return post(cluster, path, headers, content); } @@ -749,14 +726,13 @@ public Response post(String path, Header[] headers, byte[] content) * Send a POST request * @param cluster the cluster definition * @param path the path or URI - * @param headers the HTTP headers to include, Content-Type must be - * supplied + * @param headers the HTTP headers to include, Content-Type must be supplied * @param content the content bytes * @return a Response object with response detail * @throws IOException */ - public Response post(Cluster cluster, String path, Header[] headers, - byte[] content) throws IOException { + public Response post(Cluster cluster, String path, Header[] headers, byte[] content) + throws IOException { HttpPost method = new HttpPost(path); try { method.setEntity(new InputStreamEntity(new ByteArrayInputStream(content), content.length)); @@ -829,7 +805,6 @@ public Response delete(Cluster cluster, String path, Header extraHdr) throws IOE } } - public static class ClientTrustStoreInitializationException extends RuntimeException { public ClientTrustStoreInitializationException(String message, Throwable cause) { diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java index 008470826dea..dbb30adbc74b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Cluster.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,29 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.client; import java.util.ArrayList; import java.util.Collections; import java.util.List; - import org.apache.yetus.audience.InterfaceAudience; /** - * A list of 'host:port' addresses of HTTP servers operating as a single - * entity, for example multiple redundant web service gateways. + * A list of 'host:port' addresses of HTTP servers operating as a single entity, for example + * multiple redundant web service gateways. */ @InterfaceAudience.Public public class Cluster { - protected List nodes = - Collections.synchronizedList(new ArrayList()); + protected List nodes = Collections.synchronizedList(new ArrayList()); protected String lastHost; /** * Constructor */ - public Cluster() {} + public Cluster() { + } /** * Constructor @@ -99,10 +96,8 @@ public Cluster remove(String name, int port) { return remove(sb.toString()); } - @Override public String toString() { - return "Cluster{" + - "nodes=" + nodes + - ", lastHost='" + lastHost + '\'' + - '}'; + @Override + public String toString() { + return "Cluster{" + "nodes=" + nodes + ", lastHost='" + lastHost + '\'' + '}'; } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java index 0e91005ab2b8..fbffa71294b8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/Response.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,12 +19,9 @@ import java.io.IOException; import java.io.InputStream; - import org.apache.http.Header; import org.apache.http.HttpResponse; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -70,10 +66,9 @@ public Response(int code, Header[] headers, byte[] body) { this.headers = headers; this.body = body; } - + /** * Constructor. Note: this is not thread-safe - * * @param code the HTTP response code * @param headers headers the HTTP response headers * @param resp the response @@ -93,13 +88,12 @@ public Response(int code, Header[] headers, HttpResponse resp, InputStream in) { public int getCode() { return code; } - + /** * Gets the input stream instance. - * * @return an instance of InputStream class. */ - public InputStream getStream(){ + public InputStream getStream() { return this.stream; } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java index b9b8a006437c..d9dbd8707362 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/AuthFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -7,7 +7,7 @@ * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -44,18 +44,16 @@ public class AuthFilter extends AuthenticationFilter { private static final int REST_PREFIX_LEN = REST_PREFIX.length(); /** - * Returns the configuration to be used by the authentication filter - * to initialize the authentication handler. - * - * This filter retrieves all HBase configurations and passes those started - * with REST_PREFIX to the authentication handler. It is useful to support - * plugging different authentication handlers. - */ + * Returns the configuration to be used by the authentication filter to initialize the + * authentication handler. This filter retrieves all HBase configurations and passes those started + * with REST_PREFIX to the authentication handler. It is useful to support plugging different + * authentication handlers. + */ @Override - protected Properties getConfiguration( - String configPrefix, FilterConfig filterConfig) throws ServletException { + protected Properties getConfiguration(String configPrefix, FilterConfig filterConfig) + throws ServletException { Properties props = super.getConfiguration(configPrefix, filterConfig); - //setting the cookie path to root '/' so it is used for all resources. + // setting the cookie path to root '/' so it is used for all resources. props.setProperty(AuthenticationFilter.COOKIE_PATH, "/"); Configuration conf = null; @@ -70,11 +68,10 @@ protected Properties getConfiguration( String name = entry.getKey(); if (name.startsWith(REST_PREFIX)) { String value = entry.getValue(); - if(name.equals(REST_AUTHENTICATION_PRINCIPAL)) { + if (name.equals(REST_AUTHENTICATION_PRINCIPAL)) { try { - String machineName = Strings.domainNamePointerToHostName( - DNS.getDefaultHost(conf.get(REST_DNS_INTERFACE, "default"), - conf.get(REST_DNS_NAMESERVER, "default"))); + String machineName = Strings.domainNamePointerToHostName(DNS.getDefaultHost( + conf.get(REST_DNS_INTERFACE, "default"), conf.get(REST_DNS_NAMESERVER, "default"))); value = SecurityUtil.getServerPrincipal(value, machineName); } catch (IOException ie) { throw new ServletException("Failed to retrieve server principal", ie); diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java index f74e10cae74b..efb7e2a227aa 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestStream.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,9 @@ import java.io.IOException; import java.util.zip.GZIPInputStream; - import javax.servlet.ReadListener; import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServletRequest; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java index 51eba665f3fd..db41fbb5b847 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPRequestWrapper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,17 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.filter; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; - import javax.servlet.ServletInputStream; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequestWrapper; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java index 3fa1ad6f857d..7c1a4f995472 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseStream.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -20,11 +19,9 @@ import java.io.IOException; import java.util.zip.GZIPOutputStream; - import javax.servlet.ServletOutputStream; import javax.servlet.WriteListener; import javax.servlet.http.HttpServletResponse; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java index 53a26ea1ac80..41342214100d 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GZIPResponseWrapper.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,16 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.filter; import java.io.IOException; import java.io.PrintWriter; - import javax.servlet.ServletOutputStream; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpServletResponseWrapper; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.Private @@ -80,7 +76,7 @@ public void flushBuffer() throws IOException { writer.flush(); } if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).finish(); + ((GZIPResponseStream) os).finish(); } else { getResponse().flushBuffer(); } @@ -90,7 +86,7 @@ public void flushBuffer() throws IOException { public void reset() { super.reset(); if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).resetBuffer(); + ((GZIPResponseStream) os).resetBuffer(); } writer = null; os = null; @@ -101,7 +97,7 @@ public void reset() { public void resetBuffer() { super.resetBuffer(); if (os != null && (os instanceof GZIPResponseStream)) { - ((GZIPResponseStream)os).resetBuffer(); + ((GZIPResponseStream) os).resetBuffer(); } writer = null; os = null; @@ -129,7 +125,7 @@ public void sendRedirect(String location) throws IOException { public ServletOutputStream getOutputStream() throws IOException { if (os == null) { if (!response.isCommitted() && compress) { - os = (ServletOutputStream)new GZIPResponseStream(response); + os = (ServletOutputStream) new GZIPResponseStream(response); } else { os = response.getOutputStream(); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java index 4ba9eca302d0..9aecef5881e0 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/GzipFilter.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -24,7 +23,6 @@ import java.util.Locale; import java.util.Set; import java.util.StringTokenizer; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -33,9 +31,7 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.hbase.HBaseInterfaceAudience; - import org.apache.yetus.audience.InterfaceAudience; @InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG) @@ -58,27 +54,25 @@ public void destroy() { } @Override - public void doFilter(ServletRequest req, ServletResponse rsp, - FilterChain chain) throws IOException, ServletException { - HttpServletRequest request = (HttpServletRequest)req; - HttpServletResponse response = (HttpServletResponse)rsp; + public void doFilter(ServletRequest req, ServletResponse rsp, FilterChain chain) + throws IOException, ServletException { + HttpServletRequest request = (HttpServletRequest) req; + HttpServletResponse response = (HttpServletResponse) rsp; String contentEncoding = request.getHeader("content-encoding"); String acceptEncoding = request.getHeader("accept-encoding"); String contentType = request.getHeader("content-type"); - if ((contentEncoding != null) && - (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { + if ((contentEncoding != null) && (contentEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) { request = new GZIPRequestWrapper(request); } - if (((acceptEncoding != null) && - (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) || - ((contentType != null) && mimeTypes.contains(contentType))) { + if (((acceptEncoding != null) && (acceptEncoding.toLowerCase(Locale.ROOT).contains("gzip"))) + || ((contentType != null) && mimeTypes.contains(contentType))) { response = new GZIPResponseWrapper(response); } chain.doFilter(request, response); if (response instanceof GZIPResponseWrapper) { OutputStream os = response.getOutputStream(); if (os instanceof GZIPResponseStream) { - ((GZIPResponseStream)os).finish(); + ((GZIPResponseStream) os).finish(); } } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java index 94eb314e01ab..9d0894f468d0 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/filter/RestCsrfPreventionFilter.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,7 +25,6 @@ import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; - import javax.servlet.Filter; import javax.servlet.FilterChain; import javax.servlet.FilterConfig; @@ -34,36 +33,29 @@ import javax.servlet.ServletResponse; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; - import org.apache.hadoop.conf.Configuration; - import org.apache.yetus.audience.InterfaceAudience; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - * This filter provides protection against cross site request forgery (CSRF) - * attacks for REST APIs. Enabling this filter on an endpoint results in the - * requirement of all client to send a particular (configurable) HTTP header - * with every request. In the absense of this header the filter will reject the - * attempt as a bad request. + * This filter provides protection against cross site request forgery (CSRF) attacks for REST APIs. + * Enabling this filter on an endpoint results in the requirement of all client to send a particular + * (configurable) HTTP header with every request. In the absense of this header the filter will + * reject the attempt as a bad request. */ @InterfaceAudience.Public public class RestCsrfPreventionFilter implements Filter { - private static final Logger LOG = - LoggerFactory.getLogger(RestCsrfPreventionFilter.class); + private static final Logger LOG = LoggerFactory.getLogger(RestCsrfPreventionFilter.class); public static final String HEADER_USER_AGENT = "User-Agent"; - public static final String BROWSER_USER_AGENT_PARAM = - "browser-useragents-regex"; + public static final String BROWSER_USER_AGENT_PARAM = "browser-useragents-regex"; public static final String CUSTOM_HEADER_PARAM = "custom-header"; - public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = - "methods-to-ignore"; - static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; + public static final String CUSTOM_METHODS_TO_IGNORE_PARAM = "methods-to-ignore"; + static final String BROWSER_USER_AGENTS_DEFAULT = "^Mozilla.*,^Opera.*"; public static final String HEADER_DEFAULT = "X-XSRF-HEADER"; - static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; - private String headerName = HEADER_DEFAULT; + static final String METHODS_TO_IGNORE_DEFAULT = "GET,OPTIONS,HEAD,TRACE"; + private String headerName = HEADER_DEFAULT; private Set methodsToIgnore = null; private Set browserUserAgents; @@ -73,8 +65,7 @@ public void init(FilterConfig filterConfig) { if (customHeader != null) { headerName = customHeader; } - String customMethodsToIgnore = - filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); + String customMethodsToIgnore = filterConfig.getInitParameter(CUSTOM_METHODS_TO_IGNORE_PARAM); if (customMethodsToIgnore != null) { parseMethodsToIgnore(customMethodsToIgnore); } else { @@ -86,13 +77,14 @@ public void init(FilterConfig filterConfig) { agents = BROWSER_USER_AGENTS_DEFAULT; } parseBrowserUserAgents(agents); - LOG.info(String.format("Adding cross-site request forgery (CSRF) protection, " - + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s", - headerName, methodsToIgnore, browserUserAgents)); + LOG.info(String.format( + "Adding cross-site request forgery (CSRF) protection, " + + "headerName = %s, methodsToIgnore = %s, browserUserAgents = %s", + headerName, methodsToIgnore, browserUserAgents)); } void parseBrowserUserAgents(String userAgents) { - String[] agentsArray = userAgents.split(","); + String[] agentsArray = userAgents.split(","); browserUserAgents = new HashSet<>(); for (String patternString : agentsArray) { browserUserAgents.add(Pattern.compile(patternString)); @@ -106,17 +98,14 @@ void parseMethodsToIgnore(String mti) { } /** - * This method interrogates the User-Agent String and returns whether it - * refers to a browser. If its not a browser, then the requirement for the - * CSRF header will not be enforced; if it is a browser, the requirement will - * be enforced. + * This method interrogates the User-Agent String and returns whether it refers to a browser. If + * its not a browser, then the requirement for the CSRF header will not be enforced; if it is a + * browser, the requirement will be enforced. *

    - * A User-Agent String is considered to be a browser if it matches - * any of the regex patterns from browser-useragent-regex; the default - * behavior is to consider everything a browser that matches the following: - * "^Mozilla.*,^Opera.*". Subclasses can optionally override - * this method to use different behavior. - * + * A User-Agent String is considered to be a browser if it matches any of the regex patterns from + * browser-useragent-regex; the default behavior is to consider everything a browser that matches + * the following: "^Mozilla.*,^Opera.*". Subclasses can optionally override this method to use + * different behavior. * @param userAgent The User-Agent String, or null if there isn't one * @return true if the User-Agent String refers to a browser, false if not */ @@ -134,18 +123,16 @@ protected boolean isBrowser(String userAgent) { } /** - * Defines the minimal API requirements for the filter to execute its - * filtering logic. This interface exists to facilitate integration in - * components that do not run within a servlet container and therefore cannot - * rely on a servlet container to dispatch to the {@link #doFilter} method. - * Applications that do run inside a servlet container will not need to write - * code that uses this interface. Instead, they can use typical servlet - * container configuration mechanisms to insert the filter. + * Defines the minimal API requirements for the filter to execute its filtering logic. This + * interface exists to facilitate integration in components that do not run within a servlet + * container and therefore cannot rely on a servlet container to dispatch to the {@link #doFilter} + * method. Applications that do run inside a servlet container will not need to write code that + * uses this interface. Instead, they can use typical servlet container configuration mechanisms + * to insert the filter. */ public interface HttpInteraction { /** * Returns the value of a header. - * * @param header name of header * @return value of header */ @@ -153,24 +140,21 @@ public interface HttpInteraction { /** * Returns the method. - * * @return method */ String getMethod(); /** * Called by the filter after it decides that the request may proceed. - * * @throws IOException if there is an I/O error - * @throws ServletException if the implementation relies on the servlet API - * and a servlet API call has failed + * @throws ServletException if the implementation relies on the servlet API and a servlet API + * call has failed */ void proceed() throws IOException, ServletException; /** - * Called by the filter after it decides that the request is a potential - * CSRF attack and therefore must be rejected. - * + * Called by the filter after it decides that the request is a potential CSRF attack and + * therefore must be rejected. * @param code status code to send * @param message response message * @throws IOException if there is an I/O error @@ -180,31 +164,29 @@ public interface HttpInteraction { /** * Handles an {@link HttpInteraction} by applying the filtering logic. - * * @param httpInteraction caller's HTTP interaction * @throws IOException if there is an I/O error - * @throws ServletException if the implementation relies on the servlet API - * and a servlet API call has failed + * @throws ServletException if the implementation relies on the servlet API and a servlet API call + * has failed */ public void handleHttpInteraction(HttpInteraction httpInteraction) throws IOException, ServletException { - if (!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) || - methodsToIgnore.contains(httpInteraction.getMethod()) || - httpInteraction.getHeader(headerName) != null) { + if (!isBrowser(httpInteraction.getHeader(HEADER_USER_AGENT)) + || methodsToIgnore.contains(httpInteraction.getMethod()) + || httpInteraction.getHeader(headerName) != null) { httpInteraction.proceed(); } else { httpInteraction.sendError(HttpServletResponse.SC_BAD_REQUEST, - "Missing Required Header for CSRF Vulnerability Protection"); + "Missing Required Header for CSRF Vulnerability Protection"); } } @Override - public void doFilter(ServletRequest request, ServletResponse response, - final FilterChain chain) throws IOException, ServletException { - final HttpServletRequest httpRequest = (HttpServletRequest)request; - final HttpServletResponse httpResponse = (HttpServletResponse)response; - handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, - httpResponse, chain)); + public void doFilter(ServletRequest request, ServletResponse response, final FilterChain chain) + throws IOException, ServletException { + final HttpServletRequest httpRequest = (HttpServletRequest) request; + final HttpServletResponse httpResponse = (HttpServletResponse) response; + handleHttpInteraction(new ServletFilterHttpInteraction(httpRequest, httpResponse, chain)); } @Override @@ -212,15 +194,12 @@ public void destroy() { } /** - * Constructs a mapping of configuration properties to be used for filter - * initialization. The mapping includes all properties that start with the - * specified configuration prefix. Property names in the mapping are trimmed - * to remove the configuration prefix. - * + * Constructs a mapping of configuration properties to be used for filter initialization. The + * mapping includes all properties that start with the specified configuration prefix. Property + * names in the mapping are trimmed to remove the configuration prefix. * @param conf configuration to read * @param confPrefix configuration prefix - * @return mapping of configuration properties to be used for filter - * initialization + * @return mapping of configuration properties to be used for filter initialization */ public static Map getFilterParams(Configuration conf, String confPrefix) { Map filterConfigMap = new HashMap<>(); @@ -245,7 +224,6 @@ private static final class ServletFilterHttpInteraction implements HttpInteracti /** * Creates a new ServletFilterHttpInteraction. - * * @param httpRequest request to process * @param httpResponse response to process * @param chain filter chain to forward to if HTTP interaction is allowed diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java index ffb6743f5e80..76341f8c9560 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,34 +15,31 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlValue; - -import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.hadoop.hbase.util.ByteStringer; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.CellUtil; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell; +import org.apache.hadoop.hbase.util.ByteStringer; +import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a cell. A cell is a single value associated a column and - * optional qualifier, and either the timestamp when it was stored or the user- - * provided timestamp if one was explicitly supplied. + * Representation of a cell. A cell is a single value associated a column and optional qualifier, + * and either the timestamp when it was stored or the user- provided timestamp if one was explicitly + * supplied. * *

      * <complexType name="Cell">
    @@ -59,7 +55,7 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="Cell") +@XmlRootElement(name = "Cell") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class CellModel implements ProtobufMessageHandler, Serializable { @@ -80,7 +76,8 @@ public class CellModel implements ProtobufMessageHandler, Serializable { /** * Default constructor */ - public CellModel() {} + public CellModel() { + } /** * Constructor @@ -106,8 +103,8 @@ public CellModel(byte[] column, byte[] qualifier, byte[] value) { * @param cell */ public CellModel(org.apache.hadoop.hbase.Cell cell) { - this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), CellUtil - .cloneValue(cell)); + this(CellUtil.cloneFamily(cell), CellUtil.cloneQualifier(cell), cell.getTimestamp(), + CellUtil.cloneValue(cell)); } /** @@ -129,8 +126,7 @@ public CellModel(byte[] column, long timestamp, byte[] value) { * @param timestamp * @param value */ - public CellModel(byte[] column, byte[] qualifier, long timestamp, - byte[] value) { + public CellModel(byte[] column, byte[] qualifier, long timestamp, byte[] value) { this.column = CellUtil.makeColumn(column, qualifier); this.timestamp = timestamp; this.value = value; @@ -151,8 +147,7 @@ public void setColumn(byte[] column) { } /** - * @return true if the timestamp property has been specified by the - * user + * @return true if the timestamp property has been specified by the user */ public boolean hasUserTimestamp() { return timestamp != HConstants.LATEST_TIMESTAMP; @@ -198,8 +193,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Cell.Builder builder = Cell.newBuilder(); ProtobufUtil.mergeFrom(builder, message); setColumn(builder.getColumn().toByteArray()); @@ -222,28 +216,18 @@ public boolean equals(Object obj) { return false; } CellModel cellModel = (CellModel) obj; - return new EqualsBuilder(). - append(column, cellModel.column). - append(timestamp, cellModel.timestamp). - append(value, cellModel.value). - isEquals(); + return new EqualsBuilder().append(column, cellModel.column) + .append(timestamp, cellModel.timestamp).append(value, cellModel.value).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(). - append(column). - append(timestamp). - append(value). - toHashCode(); + return new HashCodeBuilder().append(column).append(timestamp).append(value).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this). - append("column", column). - append("timestamp", timestamp). - append("value", value). - toString(); + return new ToStringBuilder(this).append("column", column).append("timestamp", timestamp) + .append("value", value).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java index 7fbfa0109d16..d962c71d314c 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/CellSetModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,24 +21,21 @@ import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.CellMessage.Cell; import org.apache.hadoop.hbase.rest.protobuf.generated.CellSetMessage.CellSet; import org.apache.hadoop.hbase.util.ByteStringer; - import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a grouping of cells. May contain cells from more than - * one row. Encapsulates RowModel and CellModel models. + * Representation of a grouping of cells. May contain cells from more than one row. Encapsulates + * RowModel and CellModel models. * *
      * <complexType name="CellSet">
    @@ -70,13 +66,13 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="CellSet") +@XmlRootElement(name = "CellSet") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class CellSetModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; - @XmlElement(name="Row") + @XmlElement(name = "Row") private List rows; /** @@ -130,8 +126,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { CellSet.Builder builder = CellSet.newBuilder(); ProtobufUtil.mergeFrom(builder, message); for (CellSet.Row row : builder.getRowsList()) { @@ -142,8 +137,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) timestamp = cell.getTimestamp(); } rowModel.addCell( - new CellModel(cell.getColumn().toByteArray(), timestamp, - cell.getData().toByteArray())); + new CellModel(cell.getColumn().toByteArray(), timestamp, cell.getData().toByteArray())); } addRow(rowModel); } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java index 967f6ba2ce34..189fe58c0641 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ColumnSchemaModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,24 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonAnyGetter; +import com.fasterxml.jackson.annotation.JsonAnySetter; import java.io.Serializable; import java.util.LinkedHashMap; import java.util.Map; - import javax.xml.bind.annotation.XmlAnyAttribute; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.namespace.QName; - -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HConstants; - -import com.fasterxml.jackson.annotation.JsonAnyGetter; -import com.fasterxml.jackson.annotation.JsonAnySetter; +import org.apache.yetus.audience.InterfaceAudience; /** * Representation of a column family schema. @@ -45,7 +40,7 @@ * </complexType> * */ -@XmlRootElement(name="ColumnSchema") +@XmlRootElement(name = "ColumnSchema") @InterfaceAudience.Private public class ColumnSchemaModel implements Serializable { private static final long serialVersionUID = 1L; @@ -58,12 +53,13 @@ public class ColumnSchemaModel implements Serializable { private static QName VERSIONS = new QName(HConstants.VERSIONS); private String name; - private Map attrs = new LinkedHashMap<>(); + private Map attrs = new LinkedHashMap<>(); /** * Default constructor */ - public ColumnSchemaModel() {} + public ColumnSchemaModel() { + } /** * Add an attribute to the column family schema @@ -81,7 +77,7 @@ public void addAttribute(String name, Object value) { */ public String getAttribute(String name) { Object o = attrs.get(new QName(name)); - return o != null ? o.toString(): null; + return o != null ? o.toString() : null; } /** @@ -97,7 +93,7 @@ public String getName() { */ @XmlAnyAttribute @JsonAnyGetter - public Map getAny() { + public Map getAny() { return attrs; } @@ -108,7 +104,8 @@ public void setName(String name) { this.name = name; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -117,7 +114,7 @@ public String toString() { sb.append("{ NAME => '"); sb.append(name); sb.append('\''); - for (Map.Entry e: attrs.entrySet()) { + for (Map.Entry e : attrs.entrySet()) { sb.append(", "); sb.append(e.getKey().getLocalPart()); sb.append(" => '"); @@ -138,8 +135,7 @@ public String toString() { */ public boolean __getBlockcache() { Object o = attrs.get(BLOCKCACHE); - return o != null ? - Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE; + return o != null ? Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKCACHE; } /** @@ -147,8 +143,7 @@ public boolean __getBlockcache() { */ public int __getBlocksize() { Object o = attrs.get(BLOCKSIZE); - return o != null ? - Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE; + return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_BLOCKSIZE; } /** @@ -172,8 +167,7 @@ public String __getCompression() { */ public boolean __getInMemory() { Object o = attrs.get(IN_MEMORY); - return o != null ? - Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY; + return o != null ? Boolean.parseBoolean(o.toString()) : HColumnDescriptor.DEFAULT_IN_MEMORY; } /** @@ -181,8 +175,7 @@ public boolean __getInMemory() { */ public int __getTTL() { Object o = attrs.get(TTL); - return o != null ? - Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_TTL; + return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_TTL; } /** @@ -190,8 +183,7 @@ public int __getTTL() { */ public int __getVersions() { Object o = attrs.get(VERSIONS); - return o != null ? - Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS; + return o != null ? Integer.parseInt(o.toString()) : HColumnDescriptor.DEFAULT_VERSIONS; } /** @@ -216,7 +208,7 @@ public void __setBloomfilter(String value) { * @param value the desired value of the COMPRESSION attribute */ public void __setCompression(String value) { - attrs.put(COMPRESSION, value); + attrs.put(COMPRESSION, value); } /** diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java index af3b0b067a43..ac2f053621ff 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesInstanceModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import java.io.IOException; @@ -24,18 +22,15 @@ import java.util.HashMap; import java.util.List; import java.util.Map; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlRootElement; import javax.xml.bind.annotation.XmlTransient; - import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; -import org.apache.hadoop.hbase.rest.protobuf - .generated.NamespacePropertiesMessage.NamespaceProperties; +import org.apache.hadoop.hbase.rest.protobuf.generated.NamespacePropertiesMessage.NamespaceProperties; +import org.apache.yetus.audience.InterfaceAudience; /** * List a HBase namespace's key/value properties. @@ -47,7 +42,7 @@ *
  • value: property value
  • * */ -@XmlRootElement(name="NamespaceProperties") +@XmlRootElement(name = "NamespaceProperties") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class NamespacesInstanceModel implements Serializable, ProtobufMessageHandler { @@ -55,7 +50,7 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan private static final long serialVersionUID = 1L; // JAX-RS automatically converts Map to XMLAnyElement. - private Map properties = null; + private Map properties = null; @XmlTransient private String namespaceName; @@ -63,7 +58,8 @@ public class NamespacesInstanceModel implements Serializable, ProtobufMessageHan /** * Default constructor. Do not use. */ - public NamespacesInstanceModel() {} + public NamespacesInstanceModel() { + } /** * Constructor to use if namespace does not exist in HBASE. @@ -82,12 +78,16 @@ public NamespacesInstanceModel(String namespaceName) throws IOException { */ public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOException { this.namespaceName = namespaceName; - if(admin == null) { return; } + if (admin == null) { + return; + } NamespaceDescriptor nd = admin.getNamespaceDescriptor(namespaceName); // For properly formed JSON, if no properties, field has to be null (not just no elements). - if(nd.getConfiguration().isEmpty()){ return; } + if (nd.getConfiguration().isEmpty()) { + return; + } properties = new HashMap<>(); properties.putAll(nd.getConfiguration()); @@ -99,7 +99,7 @@ public NamespacesInstanceModel(Admin admin, String namespaceName) throws IOExcep * @param value attribute value */ public void addProperty(String key, String value) { - if(properties == null){ + if (properties == null) { properties = new HashMap<>(); } properties.put(key, value); @@ -108,18 +108,19 @@ public void addProperty(String key, String value) { /** * @return The map of uncategorized namespace properties. */ - public Map getProperties() { - if(properties == null){ + public Map getProperties() { + if (properties == null) { properties = new HashMap<>(); } return properties; } - public String getNamespaceName(){ + public String getNamespaceName() { return namespaceName; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override @@ -128,7 +129,7 @@ public String toString() { sb.append("{NAME => \'"); sb.append(namespaceName); sb.append("\'"); - if(properties != null){ + if (properties != null) { for (Map.Entry entry : properties.entrySet()) { sb.append(", "); sb.append(entry.getKey()); @@ -144,7 +145,7 @@ public String toString() { @Override public byte[] createProtobufOutput() { NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); - if(properties != null){ + if (properties != null) { for (Map.Entry entry : properties.entrySet()) { String key = entry.getKey(); NamespaceProperties.Property.Builder property = NamespaceProperties.Property.newBuilder(); @@ -161,7 +162,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOExce NamespaceProperties.Builder builder = NamespaceProperties.newBuilder(); builder.mergeFrom(message); List properties = builder.getPropsList(); - for(NamespaceProperties.Property property: properties){ + for (NamespaceProperties.Property property : properties) { addProperty(property.getKey(), property.getValue()); } return this; diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java index c0dfa75355ef..e0e2cc1051b8 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/NamespacesModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,27 +15,22 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; +import com.fasterxml.jackson.annotation.JsonProperty; import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.NamespaceDescriptor; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Admin; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.NamespacesMessage.Namespaces; - -import com.fasterxml.jackson.annotation.JsonProperty; - +import org.apache.yetus.audience.InterfaceAudience; /** * A list of HBase namespaces. @@ -44,7 +38,7 @@ *
  • Namespace: namespace name
  • * */ -@XmlRootElement(name="Namespaces") +@XmlRootElement(name = "Namespaces") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class NamespacesModel implements Serializable, ProtobufMessageHandler { @@ -52,13 +46,14 @@ public class NamespacesModel implements Serializable, ProtobufMessageHandler { private static final long serialVersionUID = 1L; @JsonProperty("Namespace") - @XmlElement(name="Namespace") + @XmlElement(name = "Namespace") private List namespaces = new ArrayList<>(); /** * Default constructor. Do not use. */ - public NamespacesModel() {} + public NamespacesModel() { + } /** * Constructor @@ -87,7 +82,8 @@ public void setNamespaces(List namespaces) { this.namespaces = namespaces; } - /* (non-Javadoc) + /* + * (non-Javadoc) * @see java.lang.Object#toString() */ @Override diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java index b560f697dead..c8baf2e3467b 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/RowModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,22 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.commons.lang3.builder.ToStringBuilder; @@ -40,9 +35,8 @@ import org.apache.yetus.audience.InterfaceAudience; /** - * Representation of a row. A row is a related set of cells, grouped by common - * row key. RowModels do not appear in results by themselves. They are always - * encapsulated within CellSetModels. + * Representation of a row. A row is a related set of cells, grouped by common row key. RowModels do + * not appear in results by themselves. They are always encapsulated within CellSetModels. * *
      * <complexType name="Row">
    @@ -54,7 +48,7 @@
      * </complexType>
      * 
    */ -@XmlRootElement(name="Row") +@XmlRootElement(name = "Row") @XmlAccessorType(XmlAccessType.FIELD) @InterfaceAudience.Private public class RowModel implements ProtobufMessageHandler, Serializable { @@ -65,14 +59,14 @@ public class RowModel implements ProtobufMessageHandler, Serializable { private byte[] key; @JsonProperty("Cell") - @XmlElement(name="Cell") + @XmlElement(name = "Cell") private List cells = new ArrayList<>(); - /** * Default constructor */ - public RowModel() { } + public RowModel() { + } /** * Constructor @@ -81,7 +75,7 @@ public RowModel() { } public RowModel(final String key) { this(Bytes.toBytes(key)); } - + /** * Constructor * @param key the row key @@ -99,7 +93,7 @@ public RowModel(final byte[] key) { public RowModel(final String key, final List cells) { this(Bytes.toBytes(key), cells); } - + /** * Constructor * @param key the row key @@ -109,7 +103,7 @@ public RowModel(final byte[] key, final List cells) { this.key = key; this.cells = cells; } - + /** * Adds a cell to the list of cells for this row * @param cell the cell @@ -142,16 +136,13 @@ public List getCells() { @Override public byte[] createProtobufOutput() { // there is no standalone row protobuf message - throw new UnsupportedOperationException( - "no protobuf equivalent to RowModel"); + throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { // there is no standalone row protobuf message - throw new UnsupportedOperationException( - "no protobuf equivalent to RowModel"); + throw new UnsupportedOperationException("no protobuf equivalent to RowModel"); } @Override @@ -166,25 +157,16 @@ public boolean equals(Object obj) { return false; } RowModel rowModel = (RowModel) obj; - return new EqualsBuilder(). - append(key, rowModel.key). - append(cells, rowModel.cells). - isEquals(); + return new EqualsBuilder().append(key, rowModel.key).append(cells, rowModel.cells).isEquals(); } @Override public int hashCode() { - return new HashCodeBuilder(). - append(key). - append(cells). - toHashCode(); + return new HashCodeBuilder().append(key).append(cells).toHashCode(); } @Override public String toString() { - return new ToStringBuilder(this). - append("key", key). - append("cells", cells). - toString(); + return new ToStringBuilder(this).append("key", key).append("cells", cells).toString(); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java index 228d2885ec82..681855444b46 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -16,7 +15,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonInclude; @@ -97,7 +95,7 @@ * </complexType> * */ -@XmlRootElement(name="Scanner") +@XmlRootElement(name = "Scanner") @JsonInclude(JsonInclude.Include.NON_NULL) @InterfaceAudience.Private public class ScannerModel implements ProtobufMessageHandler, Serializable { @@ -117,8 +115,8 @@ public class ScannerModel implements ProtobufMessageHandler, Serializable { private boolean cacheBlocks = true; /** - * Implement lazily-instantiated singleton as per recipe - * here: http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ + * Implement lazily-instantiated singleton as per recipe here: + * http://literatejava.com/jvm/fastest-threadsafe-singleton-jvm/ */ private static class JaxbJsonProviderHolder { static final JacksonJaxbJsonProvider INSTANCE = new JacksonJaxbJsonProvider(); @@ -129,23 +127,22 @@ static class FilterModel { @XmlRootElement static class ByteArrayComparableModel { - @XmlAttribute public String type; - @XmlAttribute public String value; - @XmlAttribute public String op; + @XmlAttribute + public String type; + @XmlAttribute + public String value; + @XmlAttribute + public String op; static enum ComparatorType { - BinaryComparator, - BinaryPrefixComparator, - BitComparator, - NullComparator, - RegexStringComparator, - SubstringComparator + BinaryComparator, BinaryPrefixComparator, BitComparator, NullComparator, + RegexStringComparator, SubstringComparator } - public ByteArrayComparableModel() { } + public ByteArrayComparableModel() { + } - public ByteArrayComparableModel( - ByteArrayComparable comparator) { + public ByteArrayComparableModel(ByteArrayComparable comparator) { String typeName = comparator.getClass().getSimpleName(); ComparatorType type = ComparatorType.valueOf(typeName); this.type = typeName; @@ -156,7 +153,7 @@ public ByteArrayComparableModel( break; case BitComparator: this.value = Bytes.toString(Base64.getEncoder().encode(comparator.getValue())); - this.op = ((BitComparator)comparator).getOperator().toString(); + this.op = ((BitComparator) comparator).getOperator().toString(); break; case NullComparator: break; @@ -201,54 +198,57 @@ public ByteArrayComparable build() { // A grab bag of fields, would have been a union if this were C. // These are null by default and will only be serialized if set (non null). - @XmlAttribute public String type; - @XmlAttribute public String op; - @XmlElement ByteArrayComparableModel comparator; - @XmlAttribute public String value; - @XmlElement public List filters; - @XmlAttribute public Integer limit; - @XmlAttribute public Integer offset; - @XmlAttribute public String family; - @XmlAttribute public String qualifier; - @XmlAttribute public Boolean ifMissing; - @XmlAttribute public Boolean latestVersion; - @XmlAttribute public String minColumn; - @XmlAttribute public Boolean minColumnInclusive; - @XmlAttribute public String maxColumn; - @XmlAttribute public Boolean maxColumnInclusive; - @XmlAttribute public Boolean dropDependentColumn; - @XmlAttribute public Float chance; - @XmlElement public List prefixes; - @XmlElement private List ranges; - @XmlElement public List timestamps; + @XmlAttribute + public String type; + @XmlAttribute + public String op; + @XmlElement + ByteArrayComparableModel comparator; + @XmlAttribute + public String value; + @XmlElement + public List filters; + @XmlAttribute + public Integer limit; + @XmlAttribute + public Integer offset; + @XmlAttribute + public String family; + @XmlAttribute + public String qualifier; + @XmlAttribute + public Boolean ifMissing; + @XmlAttribute + public Boolean latestVersion; + @XmlAttribute + public String minColumn; + @XmlAttribute + public Boolean minColumnInclusive; + @XmlAttribute + public String maxColumn; + @XmlAttribute + public Boolean maxColumnInclusive; + @XmlAttribute + public Boolean dropDependentColumn; + @XmlAttribute + public Float chance; + @XmlElement + public List prefixes; + @XmlElement + private List ranges; + @XmlElement + public List timestamps; static enum FilterType { - ColumnCountGetFilter, - ColumnPaginationFilter, - ColumnPrefixFilter, - ColumnRangeFilter, - DependentColumnFilter, - FamilyFilter, - FilterList, - FirstKeyOnlyFilter, - InclusiveStopFilter, - KeyOnlyFilter, - MultipleColumnPrefixFilter, - MultiRowRangeFilter, - PageFilter, - PrefixFilter, - QualifierFilter, - RandomRowFilter, - RowFilter, - SingleColumnValueExcludeFilter, - SingleColumnValueFilter, - SkipFilter, - TimestampsFilter, - ValueFilter, - WhileMatchFilter + ColumnCountGetFilter, ColumnPaginationFilter, ColumnPrefixFilter, ColumnRangeFilter, + DependentColumnFilter, FamilyFilter, FilterList, FirstKeyOnlyFilter, InclusiveStopFilter, + KeyOnlyFilter, MultipleColumnPrefixFilter, MultiRowRangeFilter, PageFilter, PrefixFilter, + QualifierFilter, RandomRowFilter, RowFilter, SingleColumnValueExcludeFilter, + SingleColumnValueFilter, SkipFilter, TimestampsFilter, ValueFilter, WhileMatchFilter } - public FilterModel() { } + public FilterModel() { + } public FilterModel(Filter filter) { String typeName = filter.getClass().getSimpleName(); @@ -256,25 +256,25 @@ public FilterModel(Filter filter) { this.type = typeName; switch (type) { case ColumnCountGetFilter: - this.limit = ((ColumnCountGetFilter)filter).getLimit(); + this.limit = ((ColumnCountGetFilter) filter).getLimit(); break; case ColumnPaginationFilter: - this.limit = ((ColumnPaginationFilter)filter).getLimit(); - this.offset = ((ColumnPaginationFilter)filter).getOffset(); + this.limit = ((ColumnPaginationFilter) filter).getLimit(); + this.offset = ((ColumnPaginationFilter) filter).getOffset(); break; case ColumnPrefixFilter: - byte[] src = ((ColumnPrefixFilter)filter).getPrefix(); + byte[] src = ((ColumnPrefixFilter) filter).getPrefix(); this.value = Bytes.toString(Base64.getEncoder().encode(src)); break; case ColumnRangeFilter: - ColumnRangeFilter crf = (ColumnRangeFilter)filter; + ColumnRangeFilter crf = (ColumnRangeFilter) filter; this.minColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMinColumn())); this.minColumnInclusive = crf.getMinColumnInclusive(); this.maxColumn = Bytes.toString(Base64.getEncoder().encode(crf.getMaxColumn())); this.maxColumnInclusive = crf.getMaxColumnInclusive(); break; case DependentColumnFilter: { - DependentColumnFilter dcf = (DependentColumnFilter)filter; + DependentColumnFilter dcf = (DependentColumnFilter) filter; this.family = Bytes.toString(Base64.getEncoder().encode(dcf.getFamily())); byte[] qualifier = dcf.getQualifier(); if (qualifier != null) { @@ -283,11 +283,12 @@ public FilterModel(Filter filter) { this.op = dcf.getOperator().toString(); this.comparator = new ByteArrayComparableModel(dcf.getComparator()); this.dropDependentColumn = dcf.dropDependentColumn(); - } break; + } + break; case FilterList: - this.op = ((FilterList)filter).getOperator().toString(); + this.op = ((FilterList) filter).getOperator().toString(); this.filters = new ArrayList<>(); - for (Filter child: ((FilterList)filter).getFilters()) { + for (Filter child : ((FilterList) filter).getFilters()) { this.filters.add(new FilterModel(child)); } break; @@ -295,40 +296,38 @@ public FilterModel(Filter filter) { case KeyOnlyFilter: break; case InclusiveStopFilter: - this.value = Bytes.toString(Base64.getEncoder().encode( - ((InclusiveStopFilter)filter).getStopRowKey())); + this.value = Bytes + .toString(Base64.getEncoder().encode(((InclusiveStopFilter) filter).getStopRowKey())); break; case MultipleColumnPrefixFilter: this.prefixes = new ArrayList<>(); - for (byte[] prefix: ((MultipleColumnPrefixFilter)filter).getPrefix()) { + for (byte[] prefix : ((MultipleColumnPrefixFilter) filter).getPrefix()) { this.prefixes.add(Bytes.toString(Base64.getEncoder().encode(prefix))); } break; case MultiRowRangeFilter: this.ranges = new ArrayList<>(); - for(RowRange range : ((MultiRowRangeFilter)filter).getRowRanges()) { + for (RowRange range : ((MultiRowRangeFilter) filter).getRowRanges()) { this.ranges.add(new RowRange(range.getStartRow(), range.isStartRowInclusive(), range.getStopRow(), range.isStopRowInclusive())); } break; case PageFilter: - this.value = Long.toString(((PageFilter)filter).getPageSize()); + this.value = Long.toString(((PageFilter) filter).getPageSize()); break; case PrefixFilter: - this.value = Bytes.toString(Base64.getEncoder().encode( - ((PrefixFilter)filter).getPrefix())); + this.value = + Bytes.toString(Base64.getEncoder().encode(((PrefixFilter) filter).getPrefix())); break; case FamilyFilter: case QualifierFilter: case RowFilter: case ValueFilter: - this.op = ((CompareFilter)filter).getOperator().toString(); - this.comparator = - new ByteArrayComparableModel( - ((CompareFilter)filter).getComparator()); + this.op = ((CompareFilter) filter).getOperator().toString(); + this.comparator = new ByteArrayComparableModel(((CompareFilter) filter).getComparator()); break; case RandomRowFilter: - this.chance = ((RandomRowFilter)filter).getChance(); + this.chance = ((RandomRowFilter) filter).getChance(); break; case SingleColumnValueExcludeFilter: case SingleColumnValueFilter: { @@ -339,26 +338,25 @@ public FilterModel(Filter filter) { this.qualifier = Bytes.toString(Base64.getEncoder().encode(qualifier)); } this.op = scvf.getOperator().toString(); - this.comparator = - new ByteArrayComparableModel(scvf.getComparator()); + this.comparator = new ByteArrayComparableModel(scvf.getComparator()); if (scvf.getFilterIfMissing()) { this.ifMissing = true; } if (scvf.getLatestVersionOnly()) { this.latestVersion = true; } - } break; + } + break; case SkipFilter: this.filters = new ArrayList<>(); - this.filters.add(new FilterModel(((SkipFilter)filter).getFilter())); + this.filters.add(new FilterModel(((SkipFilter) filter).getFilter())); break; case TimestampsFilter: - this.timestamps = ((TimestampsFilter)filter).getTimestamps(); + this.timestamps = ((TimestampsFilter) filter).getTimestamps(); break; case WhileMatchFilter: this.filters = new ArrayList<>(); - this.filters.add( - new FilterModel(((WhileMatchFilter)filter).getFilter())); + this.filters.add(new FilterModel(((WhileMatchFilter) filter).getFilter())); break; default: throw new RuntimeException("unhandled filter type " + type); @@ -368,105 +366,107 @@ public FilterModel(Filter filter) { public Filter build() { Filter filter; switch (FilterType.valueOf(type)) { - case ColumnCountGetFilter: - filter = new ColumnCountGetFilter(limit); - break; - case ColumnPaginationFilter: - filter = new ColumnPaginationFilter(limit, offset); - break; - case ColumnPrefixFilter: - filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); - break; - case ColumnRangeFilter: - filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), - minColumnInclusive, Base64.getDecoder().decode(maxColumn), - maxColumnInclusive); - break; - case DependentColumnFilter: - filter = new DependentColumnFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - dropDependentColumn, CompareOperator.valueOf(op), comparator.build()); - break; - case FamilyFilter: - filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case FilterList: { - List list = new ArrayList<>(filters.size()); - for (FilterModel model: filters) { - list.add(model.build()); - } - filter = new FilterList(FilterList.Operator.valueOf(op), list); - } break; - case FirstKeyOnlyFilter: - filter = new FirstKeyOnlyFilter(); - break; - case InclusiveStopFilter: - filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); - break; - case KeyOnlyFilter: - filter = new KeyOnlyFilter(); - break; - case MultipleColumnPrefixFilter: { - byte[][] values = new byte[prefixes.size()][]; - for (int i = 0; i < prefixes.size(); i++) { - values[i] = Base64.getDecoder().decode(prefixes.get(i)); - } - filter = new MultipleColumnPrefixFilter(values); - } break; - case MultiRowRangeFilter: { - filter = new MultiRowRangeFilter(ranges); - } break; - case PageFilter: - filter = new PageFilter(Long.parseLong(value)); - break; - case PrefixFilter: - filter = new PrefixFilter(Base64.getDecoder().decode(value)); - break; - case QualifierFilter: - filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case RandomRowFilter: - filter = new RandomRowFilter(chance); - break; - case RowFilter: - filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case SingleColumnValueFilter: - filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - CompareOperator.valueOf(op), comparator.build()); - if (ifMissing != null) { - ((SingleColumnValueFilter)filter).setFilterIfMissing(ifMissing); - } - if (latestVersion != null) { - ((SingleColumnValueFilter)filter).setLatestVersionOnly(latestVersion); + case ColumnCountGetFilter: + filter = new ColumnCountGetFilter(limit); + break; + case ColumnPaginationFilter: + filter = new ColumnPaginationFilter(limit, offset); + break; + case ColumnPrefixFilter: + filter = new ColumnPrefixFilter(Base64.getDecoder().decode(value)); + break; + case ColumnRangeFilter: + filter = new ColumnRangeFilter(Base64.getDecoder().decode(minColumn), minColumnInclusive, + Base64.getDecoder().decode(maxColumn), maxColumnInclusive); + break; + case DependentColumnFilter: + filter = new DependentColumnFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, dropDependentColumn, + CompareOperator.valueOf(op), comparator.build()); + break; + case FamilyFilter: + filter = new FamilyFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case FilterList: { + List list = new ArrayList<>(filters.size()); + for (FilterModel model : filters) { + list.add(model.build()); + } + filter = new FilterList(FilterList.Operator.valueOf(op), list); } - break; - case SingleColumnValueExcludeFilter: - filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), - qualifier != null ? Base64.getDecoder().decode(qualifier) : null, - CompareOperator.valueOf(op), comparator.build()); - if (ifMissing != null) { - ((SingleColumnValueExcludeFilter)filter).setFilterIfMissing(ifMissing); + break; + case FirstKeyOnlyFilter: + filter = new FirstKeyOnlyFilter(); + break; + case InclusiveStopFilter: + filter = new InclusiveStopFilter(Base64.getDecoder().decode(value)); + break; + case KeyOnlyFilter: + filter = new KeyOnlyFilter(); + break; + case MultipleColumnPrefixFilter: { + byte[][] values = new byte[prefixes.size()][]; + for (int i = 0; i < prefixes.size(); i++) { + values[i] = Base64.getDecoder().decode(prefixes.get(i)); + } + filter = new MultipleColumnPrefixFilter(values); } - if (latestVersion != null) { - ((SingleColumnValueExcludeFilter)filter).setLatestVersionOnly(latestVersion); + break; + case MultiRowRangeFilter: { + filter = new MultiRowRangeFilter(ranges); } - break; - case SkipFilter: - filter = new SkipFilter(filters.get(0).build()); - break; - case TimestampsFilter: - filter = new TimestampsFilter(timestamps); - break; - case ValueFilter: - filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); - break; - case WhileMatchFilter: - filter = new WhileMatchFilter(filters.get(0).build()); - break; - default: - throw new RuntimeException("unhandled filter type: " + type); + break; + case PageFilter: + filter = new PageFilter(Long.parseLong(value)); + break; + case PrefixFilter: + filter = new PrefixFilter(Base64.getDecoder().decode(value)); + break; + case QualifierFilter: + filter = new QualifierFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case RandomRowFilter: + filter = new RandomRowFilter(chance); + break; + case RowFilter: + filter = new RowFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case SingleColumnValueFilter: + filter = new SingleColumnValueFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueFilter) filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueFilter) filter).setLatestVersionOnly(latestVersion); + } + break; + case SingleColumnValueExcludeFilter: + filter = new SingleColumnValueExcludeFilter(Base64.getDecoder().decode(family), + qualifier != null ? Base64.getDecoder().decode(qualifier) : null, + CompareOperator.valueOf(op), comparator.build()); + if (ifMissing != null) { + ((SingleColumnValueExcludeFilter) filter).setFilterIfMissing(ifMissing); + } + if (latestVersion != null) { + ((SingleColumnValueExcludeFilter) filter).setLatestVersionOnly(latestVersion); + } + break; + case SkipFilter: + filter = new SkipFilter(filters.get(0).build()); + break; + case TimestampsFilter: + filter = new TimestampsFilter(timestamps); + break; + case ValueFilter: + filter = new ValueFilter(CompareOperator.valueOf(op), comparator.build()); + break; + case WhileMatchFilter: + filter = new WhileMatchFilter(filters.get(0).build()); + break; + default: + throw new RuntimeException("unhandled filter type: " + type); } return filter; } @@ -475,7 +475,6 @@ public Filter build() { /** * Get the JacksonJaxbJsonProvider instance; - * * @return A JacksonJaxbJsonProvider. */ private static JacksonJaxbJsonProvider getJasonProvider() { @@ -488,8 +487,9 @@ private static JacksonJaxbJsonProvider getJasonProvider() { * @throws Exception */ public static Filter buildFilter(String s) throws Exception { - FilterModel model = getJasonProvider().locateMapper(FilterModel.class, - MediaType.APPLICATION_JSON_TYPE).readValue(s, FilterModel.class); + FilterModel model = + getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE) + .readValue(s, FilterModel.class); return model.build(); } @@ -499,8 +499,8 @@ public static Filter buildFilter(String s) throws Exception { * @throws Exception */ public static String stringifyFilter(final Filter filter) throws Exception { - return getJasonProvider().locateMapper(FilterModel.class, - MediaType.APPLICATION_JSON_TYPE).writeValueAsString(new FilterModel(filter)); + return getJasonProvider().locateMapper(FilterModel.class, MediaType.APPLICATION_JSON_TYPE) + .writeValueAsString(new FilterModel(filter)); } private static final byte[] COLUMN_DIVIDER = Bytes.toBytes(":"); @@ -513,11 +513,11 @@ public static ScannerModel fromScan(Scan scan) throws Exception { ScannerModel model = new ScannerModel(); model.setStartRow(scan.getStartRow()); model.setEndRow(scan.getStopRow()); - Map> families = scan.getFamilyMap(); + Map> families = scan.getFamilyMap(); if (families != null) { - for (Map.Entry> entry : families.entrySet()) { + for (Map.Entry> entry : families.entrySet()) { if (entry.getValue() != null) { - for (byte[] qualifier: entry.getValue()) { + for (byte[] qualifier : entry.getValue()) { model.addColumn(Bytes.add(entry.getKey(), COLUMN_DIVIDER, qualifier)); } } else { @@ -557,7 +557,8 @@ public static ScannerModel fromScan(Scan scan) throws Exception { /** * Default constructor */ - public ScannerModel() {} + public ScannerModel() { + } /** * Constructor @@ -568,11 +569,10 @@ public ScannerModel() {} * @param caching the number of rows that the scanner will fetch at once * @param endTime the upper bound on timestamps of values of interest * @param maxVersions the maximum number of versions to return - * @param filter a filter specification - * (values with timestamps later than this are excluded) + * @param filter a filter specification (values with timestamps later than this are excluded) */ - public ScannerModel(byte[] startRow, byte[] endRow, List columns, - int batch, int caching, long endTime, int maxVersions, String filter) { + public ScannerModel(byte[] startRow, byte[] endRow, List columns, int batch, int caching, + long endTime, int maxVersions, String filter) { super(); this.startRow = startRow; this.endRow = endRow; @@ -591,14 +591,14 @@ public ScannerModel(byte[] startRow, byte[] endRow, List columns, * @param columns the columns to scan * @param batch the number of values to return in batch * @param caching the number of rows that the scanner will fetch at once - * @param startTime the lower bound on timestamps of values of interest - * (values with timestamps earlier than this are excluded) - * @param endTime the upper bound on timestamps of values of interest - * (values with timestamps later than this are excluded) + * @param startTime the lower bound on timestamps of values of interest (values with timestamps + * earlier than this are excluded) + * @param endTime the upper bound on timestamps of values of interest (values with timestamps + * later than this are excluded) * @param filter a filter specification */ - public ScannerModel(byte[] startRow, byte[] endRow, List columns, - int batch, int caching, long startTime, long endTime, String filter) { + public ScannerModel(byte[] startRow, byte[] endRow, List columns, int batch, int caching, + long startTime, long endTime, String filter) { super(); this.startRow = startRow; this.endRow = endRow; @@ -624,6 +624,7 @@ public void addColumn(byte[] column) { public void addLabel(String label) { labels.add(label); } + /** * @return true if a start row was specified */ @@ -657,12 +658,12 @@ public byte[] getEndRow() { /** * @return list of columns of interest in column:qualifier format, or empty for all */ - @XmlElement(name="column") + @XmlElement(name = "column") public List getColumns() { return columns; } - @XmlElement(name="labels") + @XmlElement(name = "labels") public List getLabels() { return labels; } @@ -759,7 +760,8 @@ public void setCaching(int caching) { } /** - * @param value true if HFile blocks should be cached on the servers for this scan, false otherwise + * @param value true if HFile blocks should be cached on the servers for this scan, false + * otherwise */ public void setCacheBlocks(boolean value) { this.cacheBlocks = value; @@ -802,7 +804,7 @@ public byte[] createProtobufOutput() { if (!Bytes.equals(endRow, HConstants.EMPTY_START_ROW)) { builder.setEndRow(ByteStringer.wrap(endRow)); } - for (byte[] column: columns) { + for (byte[] column : columns) { builder.addColumns(ByteStringer.wrap(column)); } if (startTime != 0) { @@ -828,8 +830,7 @@ public byte[] createProtobufOutput() { } @Override - public ProtobufMessageHandler getObjectFromMessage(byte[] message) - throws IOException { + public ProtobufMessageHandler getObjectFromMessage(byte[] message) throws IOException { Scanner.Builder builder = Scanner.newBuilder(); ProtobufUtil.mergeFrom(builder, message); if (builder.hasStartRow()) { @@ -838,7 +839,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) if (builder.hasEndRow()) { endRow = builder.getEndRow().toByteArray(); } - for (ByteString column: builder.getColumnsList()) { + for (ByteString column : builder.getColumnsList()) { addColumn(column.toByteArray()); } if (builder.hasBatch()) { @@ -861,7 +862,7 @@ public ProtobufMessageHandler getObjectFromMessage(byte[] message) } if (builder.getLabelsList() != null) { List labels = builder.getLabelsList(); - for(String label : labels) { + for (String label : labels) { addLabel(label); } } diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java index 4f4276a190a1..c742ccc94d25 100644 --- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java +++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/StorageClusterStatusModel.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -19,23 +18,19 @@ package org.apache.hadoop.hbase.rest.model; import com.fasterxml.jackson.annotation.JsonProperty; - import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.List; - import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlElementWrapper; import javax.xml.bind.annotation.XmlRootElement; - import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.rest.ProtobufMessageHandler; import org.apache.hadoop.hbase.rest.protobuf.generated.StorageClusterStatusMessage.StorageClusterStatus; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; - import org.apache.yetus.audience.InterfaceAudience; /** @@ -43,8 +38,8 @@ *

    *